Commit fc48b7a6 authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

qed/qede: use 8.7.3.0 FW.

This patch moves the qed* driver into utilizing the 8.7.3.0 FW.
This new FW is required for a lot of new SW features, including:
  - Vlan filtering offload
  - Encapsulation offload support
  - HW ingress aggregations
As well as paving the way for the possibility of adding storage protocols
in the future.

V2:
 - Fix kbuild test robot error/warnings.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarSudarsana Reddy Kalluru <Sudarsana.Kalluru@qlogic.com>
Signed-off-by: default avatarManish Chopra <manish.chopra@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7530e44c
......@@ -70,8 +70,8 @@ struct qed_sb_sp_info;
struct qed_mcp_info;
struct qed_rt_data {
u32 init_val;
bool b_valid;
u32 *init_val;
bool *b_valid;
};
/* The PCI personality is not quite synonymous to protocol ID:
......@@ -120,6 +120,10 @@ enum QED_PORT_MODE {
QED_PORT_MODE_DE_1X25G
};
enum qed_dev_cap {
QED_DEV_CAP_ETH,
};
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
......@@ -151,6 +155,7 @@ struct qed_hw_info {
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
};
struct qed_hw_cid_data {
......@@ -267,7 +272,7 @@ struct qed_hwfn {
struct qed_hw_info hw_info;
/* rt_array (for init-tool) */
struct qed_rt_data *rt_data;
struct qed_rt_data rt_data;
/* SPQ */
struct qed_spq *p_spq;
......@@ -350,9 +355,20 @@ struct qed_dev {
char name[NAME_SIZE];
u8 type;
#define QED_DEV_TYPE_BB_A0 (0 << 0)
#define QED_DEV_TYPE_MASK (0x3)
#define QED_DEV_TYPE_SHIFT (0)
#define QED_DEV_TYPE_BB (0 << 0)
#define QED_DEV_TYPE_AH BIT(0)
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
u16 vendor_id;
u16 device_id;
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
......@@ -361,6 +377,8 @@ struct qed_dev {
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
......@@ -375,10 +393,10 @@ struct qed_dev {
u8 num_funcs_in_port;
u8 path_id;
enum mf_mode mf_mode;
#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
enum qed_mf_mode mf_mode;
#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
int pcie_width;
int pcie_speed;
......@@ -441,11 +459,6 @@ struct qed_dev {
const struct firmware *firmware;
};
#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
QED_DEV_TYPE_SHIFT)
#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
......
......@@ -581,7 +581,8 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.num_pf_cids = iids.cids;
params.start_pq = qm_info->start_pq;
params.num_pf_pqs = qm_info->num_pqs;
params.start_vport = qm_info->num_vports;
params.start_vport = qm_info->start_vport;
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
params.pq_params = qm_info->qm_pq_params;
......
......@@ -341,11 +341,6 @@ void qed_resc_setup(struct qed_dev *cdev)
}
}
#define FINAL_CLEANUP_CMD_OFFSET (0)
#define FINAL_CLEANUP_CMD (0x1)
#define FINAL_CLEANUP_VALID_OFFSET (6)
#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
#define FINAL_CLEANUP_COMP (0x2)
#define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
......@@ -355,12 +350,14 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
command |= X_FINAL_CLEANUP_AGG_INT <<
SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
/* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
......@@ -415,18 +412,16 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
}
switch (p_hwfn->cdev->mf_mode) {
case SF:
hw_mode |= 1 << MODE_SF;
case QED_MF_DEFAULT:
case QED_MF_NPAR:
hw_mode |= 1 << MODE_MF_SI;
break;
case MF_OVLAN:
case QED_MF_OVLAN:
hw_mode |= 1 << MODE_MF_SD;
break;
case MF_NPAR:
hw_mode |= 1 << MODE_MF_SI;
break;
default:
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
hw_mode |= 1 << MODE_SF;
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
hw_mode |= 1 << MODE_MF_SI;
}
hw_mode |= 1 << MODE_ASIC;
......@@ -1018,8 +1013,7 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
u32 *resc_num = p_hwfn->hw_info.resc_num;
int num_funcs, i;
num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
: p_hwfn->cdev->num_ports_in_engines;
num_funcs = MAX_NUM_PFS_BB;
resc_num[QED_SB] = min_t(u32,
(MAX_SB_PER_PATH_BB / num_funcs),
......@@ -1071,7 +1065,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
struct qed_mcp_link_params *link;
/* Read global nvm_cfg address */
......@@ -1134,21 +1128,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
break;
}
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
offsetof(struct nvm_cfg1_func, device_id);
val = qed_rd(p_hwfn, p_ptt, addr);
if (IS_MF(p_hwfn)) {
p_hwfn->hw_info.device_id =
(val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
} else {
p_hwfn->hw_info.device_id =
(val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
}
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
......@@ -1220,18 +1199,28 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
p_hwfn->cdev->mf_mode = MF_OVLAN;
p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
p_hwfn->cdev->mf_mode = MF_NPAR;
p_hwfn->cdev->mf_mode = QED_MF_NPAR;
break;
case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
p_hwfn->cdev->mf_mode = SF;
case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
break;
}
DP_INFO(p_hwfn, "Multi function mode is %08x\n",
p_hwfn->cdev->mf_mode);
/* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, device_capabilities);
device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
__set_bit(QED_DEV_CAP_ETH,
&p_hwfn->hw_info.device_capabilities);
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
......@@ -1293,29 +1282,36 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
static void qed_get_dev_info(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
u32 tmp;
cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
/* Read Vendor Id / Device Id */
pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
&cdev->vendor_id);
pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
&cdev->device_id);
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, cdev->chip_rev);
cdev->type = QED_DEV_TYPE_BB;
/* Learn number of HW-functions */
tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
cdev->num_hwfns = 2;
} else {
cdev->num_hwfns = 1;
}
cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -513,17 +513,14 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
u8 tc, i, vport_id;
u32 inc_val;
u8 tc, i;
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
for (i = 0; i < num_vports; i++) {
if (!vport_params[i].vport_wfq)
continue;
......@@ -539,20 +536,16 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
* different TCs
*/
for (tc = 0; tc < NUM_OF_TCS; tc++) {
u16 vport_pq_id = pq_ids[tc];
u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPWEIGHT_RT_OFFSET +
vport_pq_id, inc_val);
STORE_RT_REG(p_hwfn, temp + vport_pq_id,
QM_WFQ_UPPER_BOUND |
QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPCRD_RT_OFFSET +
vport_pq_id,
QM_WFQ_INIT_CRD(inc_val) |
QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPWEIGHT_RT_OFFSET +
vport_pq_id, inc_val);
}
}
}
......@@ -709,8 +702,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
p_params->num_vports, vport_params))
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
......
......@@ -55,63 +55,98 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
int i;
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
p_hwfn->rt_data[i].b_valid = false;
p_hwfn->rt_data.b_valid[i] = false;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 val)
{
p_hwfn->rt_data[rt_offset].init_val = val;
p_hwfn->rt_data[rt_offset].b_valid = true;
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 *val,
u32 rt_offset, u32 *p_val,
size_t size)
{
size_t i;
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data[rt_offset + i].init_val = val[i];
p_hwfn->rt_data[rt_offset + i].b_valid = true;
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
}
}
static void qed_init_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u32 rt_offset,
u32 size)
static int qed_init_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u16 rt_offset,
u16 size,
bool b_must_dmae)
{
struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
u32 i;
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
u16 i, segment;
int rc = 0;
/* Since not all RT entries are initialized, go over the RT and
* for each segment of initialized values use DMA.
*/
for (i = 0; i < size; i++) {
if (!rt_data[i].b_valid)
if (!p_valid[i])
continue;
qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
/* In case there isn't any wide-bus configuration here,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
qed_wr(p_hwfn, p_ptt, addr + (i << 2),
p_init_val[i]);
continue;
}
/* Start of a new segment */
for (segment = 1; i + segment < size; segment++)
if (!p_valid[i + segment])
break;
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
if (rc != 0)
return rc;
/* Jump over the entire segment, including invalid entry */
i += segment;
}
return rc;
}
int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rt_data *rt_data;
struct qed_rt_data *rt_data = &p_hwfn->rt_data;
rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
if (!rt_data)
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL);
if (!rt_data->b_valid)
return -ENOMEM;
p_hwfn->rt_data = rt_data;
rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL);
if (!rt_data->init_val) {
kfree(rt_data->b_valid);
return -ENOMEM;
}
return 0;
}
void qed_init_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->rt_data);
p_hwfn->rt_data = NULL;
kfree(p_hwfn->rt_data.init_val);
kfree(p_hwfn->rt_data.b_valid);
}
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
......@@ -289,7 +324,8 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
case INIT_SRC_RUNTIME:
qed_init_rt(p_hwfn, p_ptt, addr,
le16_to_cpu(arg->runtime.offset),
le16_to_cpu(arg->runtime.size));
le16_to_cpu(arg->runtime.size),
b_must_dmae);
break;
}
......@@ -316,49 +352,50 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_read_op *cmd)
{
u32 data = le32_to_cpu(cmd->op_data);
u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = QED_INIT_POLL_PERIOD_US, val;
u32 data, addr, poll;
int i;
data = le32_to_cpu(cmd->op_data);
addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
bool (*comp_check)(u32 val,
u32 expected_val);
u32 delay = QED_INIT_POLL_PERIOD_US, val;
val = qed_rd(p_hwfn, p_ptt, addr);
data = le32_to_cpu(cmd->op_data);
if (GET_FIELD(data, INIT_READ_OP_POLL)) {
int i;
if (poll == INIT_POLL_NONE)
return;
switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
case INIT_COMPARISON_EQ:
comp_check = comp_eq;
break;
case INIT_COMPARISON_OR:
comp_check = comp_or;
break;
case INIT_COMPARISON_AND:
comp_check = comp_and;
break;
default:
comp_check = NULL;
DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
data);
return;
}
switch (poll) {
case INIT_POLL_EQ:
comp_check = comp_eq;
break;
case INIT_POLL_OR:
comp_check = comp_or;
break;
case INIT_POLL_AND:
comp_check = comp_and;
break;
default:
DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
cmd->op_data);
return;
}
for (i = 0;
i < QED_INIT_MAX_POLL_COUNT &&
!comp_check(val, le32_to_cpu(cmd->expected_val));
i++) {
udelay(delay);
val = qed_rd(p_hwfn, p_ptt, addr);
}
data = le32_to_cpu(cmd->expected_val);
for (i = 0;
i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
i++) {
udelay(delay);
val = qed_rd(p_hwfn, p_ptt, addr);
}
if (i == QED_INIT_MAX_POLL_COUNT)
DP_ERR(p_hwfn,
"Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
addr, le32_to_cpu(cmd->expected_val),
val, data);
if (i == QED_INIT_MAX_POLL_COUNT) {
DP_ERR(p_hwfn,
"Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
addr, le32_to_cpu(cmd->expected_val),
val, le32_to_cpu(cmd->op_data));
}
}
......
......@@ -714,7 +714,6 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(p_params->sb);
p_ramrod->sb_index = p_params->sb_idx;
p_ramrod->stats_counter_id = stats_id;
p_ramrod->tc = p_pq_params->eth.tc;
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
......@@ -821,9 +820,8 @@ qed_filter_action(enum qed_filter_opcode opcode)
case QED_FILTER_REMOVE:
action = ETH_FILTER_ACTION_REMOVE;
break;
case QED_FILTER_REPLACE:
case QED_FILTER_FLUSH:
action = ETH_FILTER_ACTION_REPLACE;
action = ETH_FILTER_ACTION_REMOVE_ALL;
break;
default:
action = MAX_ETH_FILTER_ACTION;
......@@ -892,8 +890,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
switch (p_filter_cmd->opcode) {
case QED_FILTER_FLUSH:
p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
case QED_FILTER_REPLACE:
case QED_FILTER_MOVE:
p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
default:
......@@ -962,6 +959,12 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_second_filter->action = ETH_FILTER_ACTION_ADD;
p_second_filter->vport_id = vport_to_add_to;
} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
p_first_filter->vport_id = vport_to_add_to;
memcpy(p_second_filter, p_first_filter,
sizeof(*p_second_filter));
p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
p_second_filter->action = ETH_FILTER_ACTION_ADD;
} else {
action = qed_filter_action(p_filter_cmd->opcode);
......
......@@ -190,7 +190,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
dev_info->fw_major = FW_MAJOR_VERSION;
......
......@@ -720,26 +720,25 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
if (p_hwfn->cdev->mf_mode != SF) {
info->bandwidth_min = (shmem_info.config &
FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT;
if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
info->bandwidth_min);
info->bandwidth_min = 1;
}
info->bandwidth_max = (shmem_info.config &
FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
info->bandwidth_max);
info->bandwidth_max = 100;
}
info->bandwidth_min = (shmem_info.config &
FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT;
if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
info->bandwidth_min);
info->bandwidth_min = 1;
}
info->bandwidth_max = (shmem_info.config &
FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
info->bandwidth_max);
info->bandwidth_max = 100;
}
if (shmem_info.mac_upper || shmem_info.mac_lower) {
......
......@@ -343,7 +343,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
enum mf_mode mode);
enum qed_mf_mode mode);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
......
......@@ -90,7 +90,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
enum mf_mode mode)
enum qed_mf_mode mode)
{
struct qed_sp_init_request_params params;
struct pf_start_ramrod_data *p_ramrod = NULL;
......@@ -125,6 +125,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
p_ramrod->mf_mode = mode;
switch (mode) {
case QED_MF_DEFAULT:
case QED_MF_NPAR:
p_ramrod->mf_mode = MF_NPAR;
break;
case QED_MF_OVLAN:
p_ramrod->mf_mode = MF_OVLAN;
break;
default:
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR;
}
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
......@@ -142,9 +154,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = PERSONALITY_ETH;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,
(p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
p_ramrod->outer_tag);
return qed_spq_post(p_hwfn, p_ent, NULL);
......
......@@ -173,9 +173,9 @@ enum QEDE_STATE {
* skb are built only after the frame was DMA-ed.
*/
struct sw_rx_data {
u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
struct page *data;
dma_addr_t mapping;
unsigned int page_offset;
};
struct qede_rx_queue {
......@@ -188,6 +188,7 @@ struct qede_rx_queue {
void __iomem *hw_rxq_prod_addr;
int rx_buf_size;
unsigned int rx_buf_seg_size;
u16 num_rx_buffers;
u16 rxq_id;
......@@ -281,6 +282,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev);
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
#endif /* _QEDE_H_ */
......@@ -217,9 +217,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct qed_link_params params;
u32 speed;
if (edev->dev_info.common.is_mf) {
if (!edev->dev_info.common.is_mf_default) {
DP_INFO(edev,
"Link parameters can not be changed in MF mode\n");
"Link parameters can not be changed in non-default mode\n");
return -EOPNOTSUPP;
}
......@@ -428,7 +428,7 @@ static int qede_set_pauseparam(struct net_device *dev,
struct qed_link_params params;
struct qed_link_output current_link;
if (!edev->dev_info.common.is_mf) {
if (!edev->dev_info.common.is_mf_default) {
DP_INFO(edev,
"Pause parameters can not be updated in non-default mode\n");
return -EOPNOTSUPP;
......
......@@ -11,9 +11,11 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define X_FINAL_CLEANUP_AGG_INT 1
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 4
#define FW_REVISION_VERSION 2
#define FW_MINOR_VERSION 7
#define FW_REVISION_VERSION 3
#define FW_ENGINEERING_VERSION 0
/***********************/
......@@ -152,6 +154,9 @@
/* number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
/* the size of a single queue element in bytes */
#define QM_PQ_ELEMENT_SIZE 4
/* base number of Tx PQs in the CM PQ representation.
* should be used when storing PQ IDs in CM PQ registers and context
*/
......@@ -285,6 +290,16 @@
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
#define SDM_COMP_TYPE_NONE 0
#define SDM_COMP_TYPE_WAKE_THREAD 1
#define SDM_COMP_TYPE_AGG_INT 2
#define SDM_COMP_TYPE_CM 3
#define SDM_COMP_TYPE_LOADER 4
#define SDM_COMP_TYPE_PXP 5
#define SDM_COMP_TYPE_INDICATE_ERROR 6
#define SDM_COMP_TYPE_RELEASE_THREAD 7
#define SDM_COMP_TYPE_RAM 8
/******************/
/* PBF CONSTANTS */
/******************/
......@@ -335,7 +350,7 @@ struct event_ring_entry {
/* Multi function mode */
enum mf_mode {
SF,
ERROR_MODE /* Unsupported mode */,
MF_OVLAN,
MF_NPAR,
MAX_MF_MODE
......@@ -606,4 +621,19 @@ struct status_block {
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
struct tunnel_parsing_flags {
u8 flags;
#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
};
#endif /* __COMMON_HSI__ */
This diff is collapsed.
......@@ -80,7 +80,7 @@ struct qed_dev_info {
u8 num_hwfns;
u8 hw_mac[ETH_ALEN];
bool is_mf;
bool is_mf_default;
/* FW version */
u16 fw_major;
......@@ -360,6 +360,12 @@ enum DP_MODULE {
/* to be added...up to 0x8000000 */
};
enum qed_mf_mode {
QED_MF_DEFAULT,
QED_MF_OVLAN,
QED_MF_NPAR,
};
struct qed_eth_stats {
u64 no_buff_discards;
u64 packet_too_big_discard;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment