Commit 40aa306f authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Status-block-changes'

Yuval Mintz says:

====================
qed: Status block changes

The device maintains a CAM mapping of the internal status blocks
and the various PF/VF MSI-x vector mappings.
During initialization, the driver reads the HW memory and constructs
a shadow SW implementation which it would later use for manipulation
of interrupts. E.g., when enabling VFs and setting their MSI-x tables.

The driver currently has some very strict assumptions on the order the
entries are placed in the CAM. Specifically, it assumes that all entries
belonging to a PF would be consecutive and in-order in the CAM, and that
the VF entries would then follow. But there's no actual HW constraint
enforcing this assumption [although management firmware does set it
accordingly to same assumption initially].

Since the CAM is re-configurable, there are now SW flows employeed
by other OSes that might cause the assumption to be invalid.
Such flows allow the PF to forfeit some of it's available interrupts
in favor of its VFs or vice versa.
While those are not employeed today by qed, we want to relax the
assumptions as much as we can -
both to allow functionality after PDA as well as allowing future
compatibility where the driver would be loaded after a newer one has
'dirtied' the CAM configuration.

In addition to patches meant for the above relaxation, the series
also contains various cleanups & refactoring for interrupt logic
[most of which is !semantic].
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7b954ed7 1ee240e3
...@@ -495,10 +495,6 @@ struct qed_hwfn { ...@@ -495,10 +495,6 @@ struct qed_hwfn {
bool b_rdma_enabled_in_prs; bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg; u32 rdma_prs_search_reg;
/* Array of sb_info of all status blocks */
struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
u16 num_sbs;
struct qed_cxt_mngr *p_cxt_mngr; struct qed_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/ /* Flag indicating whether interrupts are enabled or not*/
......
...@@ -1030,7 +1030,7 @@ void qed_resc_setup(struct qed_dev *cdev) ...@@ -1030,7 +1030,7 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); qed_iov_setup(p_hwfn);
#ifdef CONFIG_QED_LL2 #ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn); qed_ll2_setup(p_hwfn);
...@@ -1155,7 +1155,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) ...@@ -1155,7 +1155,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
static void qed_init_cau_rt_data(struct qed_dev *cdev) static void qed_init_cau_rt_data(struct qed_dev *cdev)
{ {
u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
int i, sb_id; int i, igu_sb_id;
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
...@@ -1165,15 +1165,17 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev) ...@@ -1165,15 +1165,17 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
p_igu_info = p_hwfn->hw_info.p_igu_info; p_igu_info = p_hwfn->hw_info.p_igu_info;
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); for (igu_sb_id = 0;
sb_id++) { igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) {
p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; p_block = &p_igu_info->entry[igu_sb_id];
if (!p_block->is_pf) if (!p_block->is_pf)
continue; continue;
qed_init_cau_sb_entry(p_hwfn, &sb_entry, qed_init_cau_sb_entry(p_hwfn, &sb_entry,
p_block->function_id, 0, 0); p_block->function_id, 0, 0);
STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
sb_entry);
} }
} }
} }
...@@ -2036,9 +2038,12 @@ static void get_function_id(struct qed_hwfn *p_hwfn) ...@@ -2036,9 +2038,12 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{ {
u32 *feat_num = p_hwfn->hw_info.feat_num; u32 *feat_num = p_hwfn->hw_info.feat_num;
struct qed_sb_cnt_info sb_cnt_info; struct qed_sb_cnt_info sb_cnt;
u32 non_l2_sbs = 0; u32 non_l2_sbs = 0;
memset(&sb_cnt, 0, sizeof(sb_cnt));
qed_int_get_num_sbs(p_hwfn, &sb_cnt);
if (IS_ENABLED(CONFIG_QED_RDMA) && if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
...@@ -2046,7 +2051,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2046,7 +2051,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
* consideration as to how many l2 queues / cnqs we have. * consideration as to how many l2 queues / cnqs we have.
*/ */
feat_num[QED_RDMA_CNQ] = feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2, min_t(u32, sb_cnt.cnt / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
non_l2_sbs = feat_num[QED_RDMA_CNQ]; non_l2_sbs = feat_num[QED_RDMA_CNQ];
...@@ -2055,14 +2060,11 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2055,14 +2060,11 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
p_hwfn->hw_info.personality == QED_PCI_ETH) { p_hwfn->hw_info.personality == QED_PCI_ETH) {
/* Start by allocating VF queues, then PF's */ /* Start by allocating VF queues, then PF's */
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
feat_num[QED_VF_L2_QUE] = min_t(u32, feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE), RESC_NUM(p_hwfn, QED_L2_QUEUE),
sb_cnt_info.sb_iov_cnt); sb_cnt.iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32, feat_num[QED_PF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_SB) - sb_cnt.cnt - non_l2_sbs,
non_l2_sbs,
RESC_NUM(p_hwfn, RESC_NUM(p_hwfn,
QED_L2_QUEUE) - QED_L2_QUEUE) -
FEAT_NUM(p_hwfn, FEAT_NUM(p_hwfn,
...@@ -2070,7 +2072,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2070,7 +2072,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
} }
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB), feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn, RESC_NUM(p_hwfn,
QED_CMDQS_CQS)); QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
...@@ -2080,7 +2082,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2080,7 +2082,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
RESC_NUM(p_hwfn, QED_SB)); (int)sb_cnt.cnt);
} }
const char *qed_hw_get_resc_name(enum qed_resources res_id) const char *qed_hw_get_resc_name(enum qed_resources res_id)
...@@ -2199,7 +2201,6 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, ...@@ -2199,7 +2201,6 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
{ {
u8 num_funcs = p_hwfn->num_funcs_on_engine; u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev); bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info;
switch (res_id) { switch (res_id) {
case QED_L2_QUEUE: case QED_L2_QUEUE:
...@@ -2251,9 +2252,10 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, ...@@ -2251,9 +2252,10 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = 1; *p_resc_num = 1;
break; break;
case QED_SB: case QED_SB:
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); /* Since we want its value to reflect whether MFW supports
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); * the new scheme, have a default of 0.
*p_resc_num = sb_cnt_info.sb_cnt; */
*p_resc_num = 0;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -2322,11 +2324,6 @@ static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, ...@@ -2322,11 +2324,6 @@ static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
goto out; goto out;
} }
/* Special handling for status blocks; Would be revised in future */
if (res_id == QED_SB) {
*p_resc_num -= 1;
*p_resc_start -= p_hwfn->enabled_func_idx;
}
out: out:
/* PQs have to divide by 8 [that's the HW granularity]. /* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit. * Reduce number so it would fit.
...@@ -2424,6 +2421,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2424,6 +2421,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return -EINVAL; return -EINVAL;
} }
/* This will also learn the number of SBs from MFW */
if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
return -EINVAL;
qed_hw_set_feat(p_hwfn); qed_hw_set_feat(p_hwfn);
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
......
...@@ -183,7 +183,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -183,7 +183,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.queue_relative_offset = (u8)tmp; p_data->q_params.queue_relative_offset = (u8)tmp;
for (i = 0; i < fcoe_pf_params->num_cqs; i++) { for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id); u16 igu_sb_id;
igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
tmp = cpu_to_le16(igu_sb_id);
p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
} }
......
...@@ -1300,6 +1300,40 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, ...@@ -1300,6 +1300,40 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
} }
static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 igu_sb_id,
u32 pi_index,
enum qed_coalescing_fsm coalescing_fsm,
u8 timeset)
{
struct cau_pi_entry pi_entry;
u32 sb_offset, pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
else
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
pi_offset = sb_offset + pi_index;
if (p_hwfn->hw_init_done) {
qed_wr(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
*((u32 *)&(pi_entry)));
} else {
STORE_RT_REG(p_hwfn,
CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
*((u32 *)&(pi_entry)));
}
}
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
dma_addr_t sb_phys, dma_addr_t sb_phys,
...@@ -1366,40 +1400,6 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, ...@@ -1366,40 +1400,6 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
} }
} }
void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 igu_sb_id,
u32 pi_index,
enum qed_coalescing_fsm coalescing_fsm,
u8 timeset)
{
struct cau_pi_entry pi_entry;
u32 sb_offset, pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
else
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
pi_offset = sb_offset + pi_index;
if (p_hwfn->hw_init_done) {
qed_wr(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
*((u32 *)&(pi_entry)));
} else {
STORE_RT_REG(p_hwfn,
CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
*((u32 *)&(pi_entry)));
}
}
void qed_int_sb_setup(struct qed_hwfn *p_hwfn, void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
{ {
...@@ -1412,16 +1412,47 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, ...@@ -1412,16 +1412,47 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->igu_sb_id, 0, 0); sb_info->igu_sb_id, 0, 0);
} }
/** struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
* @brief qed_get_igu_sb_id - given a sw sb_id return the {
* igu_sb_id struct qed_igu_block *p_block;
* u16 igu_id;
* @param p_hwfn
* @param sb_id for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
* igu_id++) {
* @return u16 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
*/
static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) if (!(p_block->status & QED_IGU_STATUS_VALID) ||
!(p_block->status & QED_IGU_STATUS_FREE))
continue;
if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
return p_block;
}
return NULL;
}
static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
{
struct qed_igu_block *p_block;
u16 igu_id;
for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
igu_id++) {
p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
if (!(p_block->status & QED_IGU_STATUS_VALID) ||
!p_block->is_pf ||
p_block->vector_number != vector_id)
continue;
return igu_id;
}
return QED_SB_INVALID_IDX;
}
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{ {
u16 igu_sb_id; u16 igu_sb_id;
...@@ -1429,7 +1460,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) ...@@ -1429,7 +1460,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
if (sb_id == QED_SP_SB_ID) if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else if (IS_PF(p_hwfn->cdev)) else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
else else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
...@@ -1454,8 +1485,19 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, ...@@ -1454,8 +1485,19 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
if (sb_id != QED_SP_SB_ID) { if (sb_id != QED_SP_SB_ID) {
p_hwfn->sbs_info[sb_id] = sb_info; if (IS_PF(p_hwfn->cdev)) {
p_hwfn->num_sbs++; struct qed_igu_info *p_info;
struct qed_igu_block *p_block;
p_info = p_hwfn->hw_info.p_igu_info;
p_block = &p_info->entry[sb_info->igu_sb_id];
p_block->sb_info = sb_info;
p_block->status &= ~QED_IGU_STATUS_FREE;
p_info->usage.free_cnt--;
} else {
qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
}
} }
sb_info->cdev = p_hwfn->cdev; sb_info->cdev = p_hwfn->cdev;
...@@ -1484,20 +1526,35 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, ...@@ -1484,20 +1526,35 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
int qed_int_sb_release(struct qed_hwfn *p_hwfn, int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info, u16 sb_id) struct qed_sb_info *sb_info, u16 sb_id)
{ {
if (sb_id == QED_SP_SB_ID) { struct qed_igu_block *p_block;
DP_ERR(p_hwfn, "Do Not free sp sb using this function"); struct qed_igu_info *p_info;
return -EINVAL;
} if (!sb_info)
return 0;
/* zero status block and ack counter */ /* zero status block and ack counter */
sb_info->sb_ack = 0; sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
if (p_hwfn->sbs_info[sb_id] != NULL) { if (IS_VF(p_hwfn->cdev)) {
p_hwfn->sbs_info[sb_id] = NULL; qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
p_hwfn->num_sbs--; return 0;
}
p_info = p_hwfn->hw_info.p_igu_info;
p_block = &p_info->entry[sb_info->igu_sb_id];
/* Vector 0 is reserved to Default SB */
if (!p_block->vector_number) {
DP_ERR(p_hwfn, "Do Not free sp sb using this function");
return -EINVAL;
} }
/* Lose reference to client's SB info, and fix counters */
p_block->sb_info = NULL;
p_block->status |= QED_IGU_STATUS_FREE;
p_info->usage.free_cnt++;
return 0; return 0;
} }
...@@ -1616,10 +1673,9 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, ...@@ -1616,10 +1673,9 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
} }
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
enum qed_int_mode int_mode) struct qed_ptt *p_ptt)
{ {
int rc = 0;
/* Configure AEU signal change to produce attentions */ /* Configure AEU signal change to produce attentions */
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
...@@ -1632,6 +1688,16 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -1632,6 +1688,16 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
/* Unmask AEU signals toward IGU */ /* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
}
int
qed_int_igu_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
{
int rc = 0;
qed_int_igu_enable_attn(p_hwfn, p_ptt);
if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = qed_slowpath_irq_req(p_hwfn); rc = qed_slowpath_irq_req(p_hwfn);
if (rc) { if (rc) {
...@@ -1660,10 +1726,11 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1660,10 +1726,11 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
#define IGU_CLEANUP_SLEEP_LENGTH (1000) #define IGU_CLEANUP_SLEEP_LENGTH (1000)
static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 sb_id, bool cleanup_set, u16 opaque_fid) u16 igu_sb_id,
bool cleanup_set, u16 opaque_fid)
{ {
u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
/* Set the data field */ /* Set the data field */
...@@ -1686,8 +1753,8 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, ...@@ -1686,8 +1753,8 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
mmiowb(); mmiowb();
/* calculate where to read the status bit from */ /* calculate where to read the status bit from */
sb_bit = 1 << (sb_id % 32); sb_bit = 1 << (igu_sb_id % 32);
sb_bit_addr = sb_id / 32 * sizeof(u32); sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
...@@ -1704,29 +1771,38 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, ...@@ -1704,29 +1771,38 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
if (!sleep_cnt) if (!sleep_cnt)
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Timeout waiting for clear status 0x%08x [for sb %d]\n", "Timeout waiting for clear status 0x%08x [for sb %d]\n",
val, sb_id); val, igu_sb_id);
} }
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 sb_id, u16 opaque, bool b_set) u16 igu_sb_id, u16 opaque, bool b_set)
{ {
struct qed_igu_block *p_block;
int pi, i; int pi, i;
p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
igu_sb_id,
p_block->function_id,
p_block->is_pf, p_block->vector_number);
/* Set */ /* Set */
if (b_set) if (b_set)
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
/* Clear */ /* Clear */
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
/* Wait for the IGU SB to cleanup */ /* Wait for the IGU SB to cleanup */
for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
u32 val; u32 val;
val = qed_rd(p_hwfn, p_ptt, val = qed_rd(p_hwfn, p_ptt,
IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4)); IGU_REG_WRITE_DONE_PENDING +
if (val & (1 << (sb_id % 32))) ((igu_sb_id / 32) * 4));
if (val & BIT((igu_sb_id % 32)))
usleep_range(10, 20); usleep_range(10, 20);
else else
break; break;
...@@ -1734,84 +1810,205 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, ...@@ -1734,84 +1810,205 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
if (i == IGU_CLEANUP_SLEEP_LENGTH) if (i == IGU_CLEANUP_SLEEP_LENGTH)
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
sb_id); igu_sb_id);
/* Clear the CAU for the SB */ /* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++) for (pi = 0; pi < 12; pi++)
qed_wr(p_hwfn, p_ptt, qed_wr(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
} }
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
bool b_set, bool b_slowpath) bool b_set, bool b_slowpath)
{ {
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; struct qed_igu_block *p_block;
u32 sb_id = 0, val = 0; u16 igu_sb_id = 0;
u32 val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, for (igu_sb_id = 0;
"IGU cleaning SBs [%d,...,%d]\n", igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
igu_base_sb, igu_base_sb + igu_sb_cnt - 1); p_block = &p_info->entry[igu_sb_id];
for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) if (!(p_block->status & QED_IGU_STATUS_VALID) ||
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, !p_block->is_pf ||
(p_block->status & QED_IGU_STATUS_DSB))
continue;
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
p_hwfn->hw_info.opaque_fid, p_hwfn->hw_info.opaque_fid,
b_set); b_set);
}
if (!b_slowpath) if (b_slowpath)
return; qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
p_info->igu_dsb_id,
p_hwfn->hw_info.opaque_fid,
b_set);
}
sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, {
"IGU cleaning slowpath SB [%d]\n", sb_id); struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, struct qed_igu_block *p_block;
p_hwfn->hw_info.opaque_fid, b_set); int pf_sbs, vf_sbs;
u16 igu_sb_id;
u32 val, rval;
if (!RESC_NUM(p_hwfn, QED_SB)) {
p_info->b_allow_pf_vf_change = false;
} else {
/* Use the numbers the MFW have provided -
* don't forget MFW accounts for the default SB as well.
*/
p_info->b_allow_pf_vf_change = true;
if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
DP_INFO(p_hwfn,
"MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
RESC_NUM(p_hwfn, QED_SB) - 1,
p_info->usage.cnt);
p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
}
if (IS_PF_SRIOV(p_hwfn)) {
u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
if (vfs != p_info->usage.iov_cnt)
DP_VERBOSE(p_hwfn,
NETIF_MSG_INTR,
"0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
p_info->usage.iov_cnt, vfs);
/* At this point we know how many SBs we have totally
* in IGU + number of PF SBs. So we can validate that
* we'd have sufficient for VF.
*/
if (vfs > p_info->usage.free_cnt +
p_info->usage.free_cnt_iov - p_info->usage.cnt) {
DP_NOTICE(p_hwfn,
"Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
p_info->usage.free_cnt +
p_info->usage.free_cnt_iov,
p_info->usage.cnt, vfs);
return -EINVAL;
}
/* Currently cap the number of VFs SBs by the
* number of VFs.
*/
p_info->usage.iov_cnt = vfs;
}
}
/* Mark all SBs as free, now in the right PF/VFs division */
p_info->usage.free_cnt = p_info->usage.cnt;
p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
p_info->usage.orig = p_info->usage.cnt;
p_info->usage.iov_orig = p_info->usage.iov_cnt;
/* We now proceed to re-configure the IGU cam to reflect the initial
* configuration. We can start with the Default SB.
*/
pf_sbs = p_info->usage.cnt;
vf_sbs = p_info->usage.iov_cnt;
for (igu_sb_id = p_info->igu_dsb_id;
igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
p_block = &p_info->entry[igu_sb_id];
val = 0;
if (!(p_block->status & QED_IGU_STATUS_VALID))
continue;
if (p_block->status & QED_IGU_STATUS_DSB) {
p_block->function_id = p_hwfn->rel_pf_id;
p_block->is_pf = 1;
p_block->vector_number = 0;
p_block->status = QED_IGU_STATUS_VALID |
QED_IGU_STATUS_PF |
QED_IGU_STATUS_DSB;
} else if (pf_sbs) {
pf_sbs--;
p_block->function_id = p_hwfn->rel_pf_id;
p_block->is_pf = 1;
p_block->vector_number = p_info->usage.cnt - pf_sbs;
p_block->status = QED_IGU_STATUS_VALID |
QED_IGU_STATUS_PF |
QED_IGU_STATUS_FREE;
} else if (vf_sbs) {
p_block->function_id =
p_hwfn->cdev->p_iov_info->first_vf_in_pf +
p_info->usage.iov_cnt - vf_sbs;
p_block->is_pf = 0;
p_block->vector_number = 0;
p_block->status = QED_IGU_STATUS_VALID |
QED_IGU_STATUS_FREE;
vf_sbs--;
} else {
p_block->function_id = 0;
p_block->is_pf = 0;
p_block->vector_number = 0;
}
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
p_block->function_id);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
p_block->vector_number);
/* VF entries would be enabled when VF is initializaed */
SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
rval = qed_rd(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
if (rval != val) {
qed_wr(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY +
sizeof(u32) * igu_sb_id, val);
DP_VERBOSE(p_hwfn,
NETIF_MSG_INTR,
"IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
igu_sb_id,
p_block->function_id,
p_block->is_pf,
p_block->vector_number, rval, val);
}
}
return 0;
} }
static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 sb_id) struct qed_ptt *p_ptt, u16 igu_sb_id)
{ {
u32 val = qed_rd(p_hwfn, p_ptt, u32 val = qed_rd(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
struct qed_igu_block *p_block; struct qed_igu_block *p_block;
p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
/* stop scanning when hit first invalid PF entry */
if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
goto out;
/* Fill the block information */ /* Fill the block information */
p_block->status = QED_IGU_STATUS_VALID; p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
p_block->function_id = GET_FIELD(val,
IGU_MAPPING_LINE_FUNCTION_NUMBER);
p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
p_block->vector_number = GET_FIELD(val, p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
IGU_MAPPING_LINE_VECTOR_NUMBER); p_block->igu_sb_id = igu_sb_id;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
sb_id, val, p_block->function_id,
p_block->is_pf, p_block->vector_number);
out:
return val;
} }
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
struct qed_igu_info *p_igu_info; struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0; struct qed_igu_block *p_block;
u16 sb_id, last_iov_sb_id = 0; u32 min_vf = 0, max_vf = 0;
struct qed_igu_block *blk; u16 igu_sb_id;
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
if (!p_hwfn->hw_info.p_igu_info) if (!p_hwfn->hw_info.p_igu_info)
...@@ -1819,12 +2016,10 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1819,12 +2016,10 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_igu_info = p_hwfn->hw_info.p_igu_info; p_igu_info = p_hwfn->hw_info.p_igu_info;
/* Initialize base sb / sb cnt for PFs and VFs */ /* Distinguish between existent and non-existent default SB */
p_igu_info->igu_base_sb = 0xffff; p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
/* Find the range of VF ids whose SB belong to this PF */
if (p_hwfn->cdev->p_iov_info) { if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
...@@ -1832,113 +2027,69 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1832,113 +2027,69 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
} }
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); for (igu_sb_id = 0;
sb_id++) { igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id]; /* Read current entry; Notice it might not belong to this PF */
qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); p_block = &p_igu_info->entry[igu_sb_id];
/* stop scanning when hit first invalid PF entry */
if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
break;
if (blk->is_pf) { if ((p_block->is_pf) &&
if (blk->function_id == p_hwfn->rel_pf_id) { (p_block->function_id == p_hwfn->rel_pf_id)) {
blk->status |= QED_IGU_STATUS_PF; p_block->status = QED_IGU_STATUS_PF |
QED_IGU_STATUS_VALID |
QED_IGU_STATUS_FREE;
if (blk->vector_number == 0) { if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
if (p_igu_info->igu_dsb_id == 0xffff) p_igu_info->usage.cnt++;
p_igu_info->igu_dsb_id = sb_id; } else if (!(p_block->is_pf) &&
} else { (p_block->function_id >= min_vf) &&
if (p_igu_info->igu_base_sb == (p_block->function_id < max_vf)) {
0xffff) {
p_igu_info->igu_base_sb = sb_id;
} else if (prev_sb_id != sb_id - 1) {
DP_NOTICE(p_hwfn->cdev,
"consecutive igu vectors for HWFN %x broken",
p_hwfn->rel_pf_id);
break;
}
prev_sb_id = sb_id;
/* we don't count the default */
(p_igu_info->igu_sb_cnt)++;
}
}
} else {
if ((blk->function_id >= min_vf) &&
(blk->function_id < max_vf)) {
/* Available for VFs of this PF */ /* Available for VFs of this PF */
if (p_igu_info->igu_base_sb_iov == 0xffff) { p_block->status = QED_IGU_STATUS_VALID |
p_igu_info->igu_base_sb_iov = sb_id; QED_IGU_STATUS_FREE;
} else if (last_iov_sb_id != sb_id - 1) {
if (!val) { if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
DP_VERBOSE(p_hwfn->cdev, p_igu_info->usage.iov_cnt++;
NETIF_MSG_INTR,
"First uninitialized IGU CAM entry at index 0x%04x\n",
sb_id);
} else {
DP_NOTICE(p_hwfn->cdev,
"Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
p_hwfn->rel_pf_id,
last_iov_sb_id,
sb_id); }
break;
}
blk->status |= QED_IGU_STATUS_FREE;
p_hwfn->hw_info.p_igu_info->free_blks++;
last_iov_sb_id = sb_id;
}
}
} }
/* There's a possibility the igu_sb_cnt_iov doesn't properly reflect /* Mark the First entry belonging to the PF or its VFs
* the number of VF SBs [especially for first VF on engine, as we can't * as the default SB [we'll reset IGU prior to first usage].
* differentiate between empty entries and its entries].
* Since we don't really support more SBs than VFs today, prevent any
* such configuration by sanitizing the number of SBs to equal the
* number of VFs.
*/ */
if (IS_PF_SRIOV(p_hwfn)) { if ((p_block->status & QED_IGU_STATUS_VALID) &&
u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs; (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
p_igu_info->igu_dsb_id = igu_sb_id;
p_block->status |= QED_IGU_STATUS_DSB;
}
if (total_vfs < p_igu_info->free_blks) { /* limit number of prints by having each PF print only its
DP_VERBOSE(p_hwfn, * entries with the exception of PF0 which would print
(NETIF_MSG_INTR | QED_MSG_IOV), * everything.
"Limiting number of SBs for IOV - %04x --> %04x\n", */
p_igu_info->free_blks, if ((p_block->status & QED_IGU_STATUS_VALID) ||
p_hwfn->cdev->p_iov_info->total_vfs); (p_hwfn->abs_pf_id == 0)) {
p_igu_info->free_blks = total_vfs; DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
} else if (total_vfs > p_igu_info->free_blks) { "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
DP_NOTICE(p_hwfn, igu_sb_id, p_block->function_id,
"IGU has only %04x SBs for VFs while the device has %04x VFs\n", p_block->is_pf, p_block->vector_number);
p_igu_info->free_blks, total_vfs);
return -EINVAL;
} }
} }
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE( if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
p_hwfn,
NETIF_MSG_INTR,
"IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb,
p_igu_info->igu_base_sb_iov,
p_igu_info->igu_sb_cnt,
p_igu_info->igu_sb_cnt_iov,
p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff ||
p_igu_info->igu_sb_cnt == 0) {
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb,
p_igu_info->igu_sb_cnt,
p_igu_info->igu_dsb_id); p_igu_info->igu_dsb_id);
return -EINVAL; return -EINVAL;
} }
/* All non default SB are considered free at this point */
p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
p_igu_info->igu_dsb_id,
p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
return 0; return 0;
} }
...@@ -2035,31 +2186,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, ...@@ -2035,31 +2186,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
if (!info || !p_sb_cnt_info) if (!info || !p_sb_cnt_info)
return; return;
p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
/* Determine origin of SB id */
if ((sb_id >= p_info->igu_base_sb) &&
(sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
/* We want the first VF queue to be adjacent to the
* last PF queue. Since L2 queues can be partial to
* SBs, we'll use the feature instead.
*/
return sb_id - p_info->igu_base_sb_iov +
FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
} else {
DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
return 0;
}
} }
void qed_int_disable_post_isr_release(struct qed_dev *cdev) void qed_int_disable_post_isr_release(struct qed_dev *cdev)
......
...@@ -78,24 +78,6 @@ enum qed_coalescing_fsm { ...@@ -78,24 +78,6 @@ enum qed_coalescing_fsm {
QED_COAL_TX_STATE_MACHINE QED_COAL_TX_STATE_MACHINE
}; };
/**
* @brief qed_int_cau_conf_pi - configure cau for a given
* status block
*
* @param p_hwfn
* @param p_ptt
* @param igu_sb_id
* @param pi_index
* @param state
* @param timeset
*/
void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 igu_sb_id,
u32 pi_index,
enum qed_coalescing_fsm coalescing_fsm,
u8 timeset);
/** /**
* @brief qed_int_igu_enable_int - enable device interrupts * @brief qed_int_igu_enable_int - enable device interrupts
* *
...@@ -217,32 +199,63 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); ...@@ -217,32 +199,63 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define SB_ALIGNED_SIZE(p_hwfn) \ #define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
struct qed_igu_block { struct qed_igu_block {
u8 status; u8 status;
#define QED_IGU_STATUS_FREE 0x01 #define QED_IGU_STATUS_FREE 0x01
#define QED_IGU_STATUS_VALID 0x02 #define QED_IGU_STATUS_VALID 0x02
#define QED_IGU_STATUS_PF 0x04 #define QED_IGU_STATUS_PF 0x04
#define QED_IGU_STATUS_DSB 0x08
u8 vector_number; u8 vector_number;
u8 function_id; u8 function_id;
u8 is_pf; u8 is_pf;
};
struct qed_igu_map { /* Index inside IGU [meant for back reference] */
struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH]; u16 igu_sb_id;
struct qed_sb_info *sb_info;
}; };
struct qed_igu_info { struct qed_igu_info {
struct qed_igu_map igu_map; struct qed_igu_block entry[MAX_TOT_SB_PER_PATH];
u16 igu_dsb_id; u16 igu_dsb_id;
u16 igu_base_sb;
u16 igu_base_sb_iov; struct qed_sb_cnt_info usage;
u16 igu_sb_cnt;
u16 igu_sb_cnt_iov; bool b_allow_pf_vf_change;
u16 free_blks;
}; };
/* TODO Names of function may change... */ /**
* @brief - Make sure the IGU CAM reflects the resources provided by MFW
*
* @param p_hwfn
* @param p_ptt
*/
int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Translate the weakly-defined client sb-id into an IGU sb-id
*
* @param p_hwfn
* @param sb_id - user provided sb_id
*
* @return an index inside IGU CAM where the SB resides
*/
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief return a pointer to an unused valid SB
*
* @param p_hwfn
* @param b_is_pf - true iff we want a SB belonging to a PF
*
* @return point to an igu_block, NULL if none is available
*/
struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
bool b_is_pf);
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
bool b_set, bool b_set,
...@@ -321,13 +334,13 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); ...@@ -321,13 +334,13 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param sb_id - igu status block id * @param igu_sb_id - igu status block id
* @param opaque - opaque fid of the sb owner. * @param opaque - opaque fid of the sb owner.
* @param b_set - set(1) / clear(0) * @param b_set - set(1) / clear(0)
*/ */
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 sb_id, u16 igu_sb_id,
u16 opaque, u16 opaque,
bool b_set); bool b_set);
...@@ -376,16 +389,6 @@ void qed_int_free(struct qed_hwfn *p_hwfn); ...@@ -376,16 +389,6 @@ void qed_int_free(struct qed_hwfn *p_hwfn);
void qed_int_setup(struct qed_hwfn *p_hwfn, void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief - Returns an Rx queue index appropriate for usage with given SB.
*
* @param p_hwfn
* @param sb_id - absolute index of SB
*
* @return index of Rx queue
*/
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/** /**
* @brief - Enable Interrupt & Attention for hw function * @brief - Enable Interrupt & Attention for hw function
* *
......
...@@ -220,7 +220,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -220,7 +220,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cmdq_sb_pi = p_params->gl_cmd_pi; p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
for (i = 0; i < p_params->num_queues; i++) { for (i = 0; i < p_params->num_queues; i++) {
val = p_hwfn->sbs_info[i]->igu_sb_id; val = qed_get_igu_sb_id(p_hwfn, i);
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
} }
......
...@@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
cdev->int_params.in.num_vectors++; /* slowpath */ cdev->int_params.in.num_vectors++; /* slowpath */
} }
......
...@@ -581,6 +581,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -581,6 +581,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent; struct qed_spq_entry *p_ent;
u32 cnq_id, sb_id; u32 cnq_id, sb_id;
u16 igu_sb_id;
int rc; int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
...@@ -612,10 +613,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -612,10 +613,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
p_cnq_params = &p_ramrod->cnq_params[cnq_id]; p_cnq_params = &p_ramrod->cnq_params[cnq_id];
p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id]; p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
p_cnq_params->sb_num =
cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
......
...@@ -378,33 +378,6 @@ static int qed_iov_pci_cfg_info(struct qed_dev *cdev) ...@@ -378,33 +378,6 @@ static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
return 0; return 0;
} }
static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_block *p_sb;
u16 sb_id;
u32 val;
if (!p_hwfn->hw_info.p_igu_info) {
DP_ERR(p_hwfn,
"qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
return;
}
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
if ((p_sb->status & QED_IGU_STATUS_FREE) &&
!(p_sb->status & QED_IGU_STATUS_PF)) {
val = qed_rd(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + sb_id * 4);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
qed_wr(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
}
}
}
static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
{ {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
...@@ -555,13 +528,12 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn) ...@@ -555,13 +528,12 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
return qed_iov_allocate_vfdb(p_hwfn); return qed_iov_allocate_vfdb(p_hwfn);
} }
void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) void qed_iov_setup(struct qed_hwfn *p_hwfn)
{ {
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return; return;
qed_iov_setup_vfdb(p_hwfn); qed_iov_setup_vfdb(p_hwfn);
qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
} }
void qed_iov_free(struct qed_hwfn *p_hwfn) void qed_iov_free(struct qed_hwfn *p_hwfn)
...@@ -868,45 +840,36 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, ...@@ -868,45 +840,36 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u16 num_rx_queues) struct qed_vf_info *vf, u16 num_rx_queues)
{ {
struct qed_igu_block *igu_blocks; struct qed_igu_block *p_block;
int qid = 0, igu_id = 0; struct cau_sb_entry sb_entry;
int qid = 0;
u32 val = 0; u32 val = 0;
igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
while ((qid < num_rx_queues) && for (qid = 0; qid < num_rx_queues; qid++) {
(igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { p_block = qed_get_igu_free_sb(p_hwfn, false);
if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { vf->igu_sbs[qid] = p_block->igu_sb_id;
struct cau_sb_entry sb_entry; p_block->status &= ~QED_IGU_STATUS_FREE;
vf->igu_sbs[qid] = (u16)igu_id;
igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
qed_wr(p_hwfn, p_ptt, qed_wr(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, IGU_REG_MAPPING_MEMORY +
val); sizeof(u32) * p_block->igu_sb_id, val);
/* Configure igu sb in CAU which were marked valid */ /* Configure igu sb in CAU which were marked valid */
qed_init_cau_sb_entry(p_hwfn, &sb_entry, qed_init_cau_sb_entry(p_hwfn, &sb_entry,
p_hwfn->rel_pf_id, p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
vf->abs_vf_id, 1);
qed_dmae_host2grc(p_hwfn, p_ptt, qed_dmae_host2grc(p_hwfn, p_ptt,
(u64)(uintptr_t)&sb_entry, (u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY + CAU_REG_SB_VAR_MEMORY +
igu_id * sizeof(u64), 2, 0); p_block->igu_sb_id * sizeof(u64), 2, 0);
qid++;
}
igu_id++;
} }
vf->num_sbs = (u8) num_rx_queues; vf->num_sbs = (u8) num_rx_queues;
...@@ -931,10 +894,8 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, ...@@ -931,10 +894,8 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
qed_wr(p_hwfn, p_ptt, addr, val); qed_wr(p_hwfn, p_ptt, addr, val);
p_info->igu_map.igu_blocks[igu_id].status |= p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
QED_IGU_STATUS_FREE; p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
p_hwfn->hw_info.p_igu_info->free_blks++;
} }
vf->num_sbs = 0; vf->num_sbs = 0;
......
...@@ -316,9 +316,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn); ...@@ -316,9 +316,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn);
* @brief qed_iov_setup - setup sriov related resources * @brief qed_iov_setup - setup sriov related resources
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt
*/ */
void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_iov_setup(struct qed_hwfn *p_hwfn);
/** /**
* @brief qed_iov_free - free sriov related resources * @brief qed_iov_free - free sriov related resources
...@@ -397,7 +396,7 @@ static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) ...@@ -397,7 +396,7 @@ static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
return 0; return 0;
} }
static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
{ {
} }
......
...@@ -792,9 +792,12 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, ...@@ -792,9 +792,12 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
req->only_untagged = only_untagged; req->only_untagged = only_untagged;
/* status blocks */ /* status blocks */
for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
if (p_hwfn->sbs_info[i]) struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
if (p_sb)
req->sb_addr[i] = p_sb->sb_phys;
}
/* add list termination tlv */ /* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset, qed_add_tlv(p_hwfn, &p_iov->offset,
...@@ -1240,6 +1243,24 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) ...@@ -1240,6 +1243,24 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
} }
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
if (!p_iov) {
DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
return;
}
if (sb_id >= PFVF_MAX_SBS_PER_VF) {
DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id);
return;
}
p_iov->sbs_info[sb_id] = p_sb;
}
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
......
...@@ -627,6 +627,14 @@ struct qed_vf_iov { ...@@ -627,6 +627,14 @@ struct qed_vf_iov {
* this has to be propagated as it affects the fastpath. * this has to be propagated as it affects the fastpath.
*/ */
bool b_pre_fp_hsi; bool b_pre_fp_hsi;
/* Current day VFs are passing the SBs physical address on vport
* start, and as they lack an IGU mapping they need to store the
* addresses of previously registered SBs.
* Even if we were to change configuration flow, due to backward
* compatibility [with older PFs] we'd still need to store these.
*/
struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
}; };
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
...@@ -836,6 +844,16 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); ...@@ -836,6 +844,16 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
*/ */
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief Stores [or removes] a configured sb_info.
*
* @param p_hwfn
* @param sb_id - zero-based SB index [for fastpath]
* @param sb_info - may be NULL [during removal].
*/
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb);
/** /**
* @brief qed_vf_pf_vport_start - perform vport start for VF. * @brief qed_vf_pf_vport_start - perform vport start for VF.
* *
......
...@@ -886,9 +886,15 @@ struct qed_eth_stats { ...@@ -886,9 +886,15 @@ struct qed_eth_stats {
#define TX_PI(tc) (RX_PI + 1 + tc) #define TX_PI(tc) (RX_PI + 1 + tc)
struct qed_sb_cnt_info { struct qed_sb_cnt_info {
int sb_cnt; /* Original, current, and free SBs for PF */
int sb_iov_cnt; int orig;
int sb_free_blk; int cnt;
int free_cnt;
/* Original, current and free SBS for child VFs */
int iov_orig;
int iov_cnt;
int free_cnt_iov;
}; };
static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment