Commit 0162a583 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-next'

Yuval Mintz says:

====================
qed*: Driver updates

This contains various minor changes to driver - changing memory allocation,
fixing a small theoretical bug, as well as some mostly-semantic changes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ea5b2f44 d4ee5289
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include "qed_hsi.h" #include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass; extern const struct qed_common_ops qed_common_ops_pass;
#define DRV_MODULE_VERSION "8.4.0.0" #define DRV_MODULE_VERSION "8.7.0.0"
#define MAX_HWFNS_PER_DEVICE (4) #define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16 #define NAME_SIZE 16
......
...@@ -448,7 +448,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) ...@@ -448,7 +448,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr; struct qed_cxt_mngr *p_mngr;
u32 i; u32 i;
p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC); p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
if (!p_mngr) { if (!p_mngr) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n"); DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
return -ENOMEM; return -ENOMEM;
......
...@@ -134,17 +134,17 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -134,17 +134,17 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/ */
qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
num_pqs, GFP_ATOMIC); num_pqs, GFP_KERNEL);
if (!qm_info->qm_pq_params) if (!qm_info->qm_pq_params)
goto alloc_err; goto alloc_err;
qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
num_vports, GFP_ATOMIC); num_vports, GFP_KERNEL);
if (!qm_info->qm_vport_params) if (!qm_info->qm_vport_params)
goto alloc_err; goto alloc_err;
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
MAX_NUM_PORTS, GFP_ATOMIC); MAX_NUM_PORTS, GFP_KERNEL);
if (!qm_info->qm_port_params) if (!qm_info->qm_port_params)
goto alloc_err; goto alloc_err;
......
...@@ -44,7 +44,7 @@ struct qed_ptt_pool { ...@@ -44,7 +44,7 @@ struct qed_ptt_pool {
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
{ {
struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
GFP_ATOMIC); GFP_KERNEL);
int i; int i;
if (!p_pool) if (!p_pool)
......
...@@ -399,7 +399,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, ...@@ -399,7 +399,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
dma_addr_t p_phys = 0; dma_addr_t p_phys = 0;
/* SB struct */ /* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
if (!p_sb) { if (!p_sb) {
DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n"); DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
return -ENOMEM; return -ENOMEM;
...@@ -473,20 +473,20 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, ...@@ -473,20 +473,20 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
u8 vf_valid) u8 vf_valid)
{ {
struct cau_sb_entry sb_entry; struct cau_sb_entry sb_entry;
u32 val;
qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
vf_number, vf_valid); vf_number, vf_valid);
if (p_hwfn->hw_init_done) { if (p_hwfn->hw_init_done) {
val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64); /* Wide-bus, initialize via DMAE */
qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys)); u64 phys_addr = (u64)sb_phys;
qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
upper_32_bits(sb_phys)); qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
CAU_REG_SB_ADDR_MEMORY +
val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64); igu_sb_id * sizeof(u64), 2, 0);
qed_wr(p_hwfn, p_ptt, val, sb_entry.data); qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params); CAU_REG_SB_VAR_MEMORY +
igu_sb_id * sizeof(u64), 2, 0);
} else { } else {
/* Initialize Status Block Address */ /* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn, STORE_RT_REG_AGG(p_hwfn,
...@@ -666,7 +666,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, ...@@ -666,7 +666,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
void *p_virt; void *p_virt;
/* SB struct */ /* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
if (!p_sb) { if (!p_sb) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n"); DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
return -ENOMEM; return -ENOMEM;
...@@ -946,7 +946,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -946,7 +946,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
u16 sb_id; u16 sb_id;
u16 prev_sb_id = 0xFF; u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC); p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
if (!p_hwfn->hw_info.p_igu_info) if (!p_hwfn->hw_info.p_igu_info)
return -ENOMEM; return -ENOMEM;
...@@ -1072,7 +1072,7 @@ static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) ...@@ -1072,7 +1072,7 @@ static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
{ {
p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC); p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
if (!p_hwfn->sp_dpc) if (!p_hwfn->sp_dpc)
return -ENOMEM; return -ENOMEM;
......
...@@ -142,9 +142,9 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, ...@@ -142,9 +142,9 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
u8 drop_ttl0_flg, u8 drop_ttl0_flg,
u8 inner_vlan_removal_en_flg) u8 inner_vlan_removal_en_flg)
{ {
struct qed_sp_init_request_params params;
struct vport_start_ramrod_data *p_ramrod = NULL; struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL; int rc = -EINVAL;
u16 rx_mode = 0; u16 rx_mode = 0;
u8 abs_vport_id = 0; u8 abs_vport_id = 0;
...@@ -153,16 +153,14 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, ...@@ -153,16 +153,14 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
if (rc != 0) if (rc != 0)
return rc; return rc;
memset(&params, 0, sizeof(params)); memset(&init_data, 0, sizeof(init_data));
params.ramrod_data_size = sizeof(*p_ramrod); init_data.cid = qed_spq_get_cid(p_hwfn);
params.comp_mode = QED_SPQ_MODE_EBLOCK; init_data.opaque_fid = opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, rc = qed_sp_init_request(p_hwfn, &p_ent,
qed_spq_get_cid(p_hwfn),
opaque_fid,
ETH_RAMROD_VPORT_START, ETH_RAMROD_VPORT_START,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&params);
if (rc) if (rc)
return rc; return rc;
...@@ -362,7 +360,7 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, ...@@ -362,7 +360,7 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
{ {
struct qed_rss_params *p_rss_params = p_params->rss_params; struct qed_rss_params *p_rss_params = p_params->rss_params;
struct vport_update_ramrod_data_cmn *p_cmn; struct vport_update_ramrod_data_cmn *p_cmn;
struct qed_sp_init_request_params sp_params; struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL; struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
u8 abs_vport_id = 0; u8 abs_vport_id = 0;
...@@ -372,17 +370,15 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, ...@@ -372,17 +370,15 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
if (rc != 0) if (rc != 0)
return rc; return rc;
memset(&sp_params, 0, sizeof(sp_params)); memset(&init_data, 0, sizeof(init_data));
sp_params.ramrod_data_size = sizeof(*p_ramrod); init_data.cid = qed_spq_get_cid(p_hwfn);
sp_params.comp_mode = comp_mode; init_data.opaque_fid = p_params->opaque_fid;
sp_params.p_comp_data = p_comp_data; init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, &p_ent, rc = qed_sp_init_request(p_hwfn, &p_ent,
qed_spq_get_cid(p_hwfn),
p_params->opaque_fid,
ETH_RAMROD_VPORT_UPDATE, ETH_RAMROD_VPORT_UPDATE,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&sp_params);
if (rc) if (rc)
return rc; return rc;
...@@ -416,8 +412,8 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, ...@@ -416,8 +412,8 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u16 opaque_fid, u16 opaque_fid,
u8 vport_id) u8 vport_id)
{ {
struct qed_sp_init_request_params sp_params;
struct vport_stop_ramrod_data *p_ramrod; struct vport_stop_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent; struct qed_spq_entry *p_ent;
u8 abs_vport_id = 0; u8 abs_vport_id = 0;
int rc; int rc;
...@@ -426,16 +422,14 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, ...@@ -426,16 +422,14 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
if (rc != 0) if (rc != 0)
return rc; return rc;
memset(&sp_params, 0, sizeof(sp_params)); memset(&init_data, 0, sizeof(init_data));
sp_params.ramrod_data_size = sizeof(*p_ramrod); init_data.cid = qed_spq_get_cid(p_hwfn);
sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; init_data.opaque_fid = opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, rc = qed_sp_init_request(p_hwfn, &p_ent,
qed_spq_get_cid(p_hwfn),
opaque_fid,
ETH_RAMROD_VPORT_STOP, ETH_RAMROD_VPORT_STOP,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&sp_params);
if (rc) if (rc)
return rc; return rc;
...@@ -514,8 +508,8 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -514,8 +508,8 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 cqe_pbl_size) u16 cqe_pbl_size)
{ {
struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_rx_cid; struct qed_hw_cid_data *p_rx_cid;
u16 abs_rx_q_id = 0; u16 abs_rx_q_id = 0;
u8 abs_vport_id = 0; u8 abs_vport_id = 0;
...@@ -540,15 +534,15 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -540,15 +534,15 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
opaque_fid, cid, params->queue_id, params->vport_id, opaque_fid, cid, params->queue_id, params->vport_id,
params->sb); params->sb);
memset(&sp_params, 0, sizeof(params)); /* Get SPQ entry */
sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; memset(&init_data, 0, sizeof(init_data));
sp_params.ramrod_data_size = sizeof(*p_ramrod); init_data.cid = cid;
init_data.opaque_fid = opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, rc = qed_sp_init_request(p_hwfn, &p_ent,
cid, opaque_fid,
ETH_RAMROD_RX_QUEUE_START, ETH_RAMROD_RX_QUEUE_START,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&sp_params);
if (rc) if (rc)
return rc; return rc;
...@@ -563,12 +557,10 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -563,12 +557,10 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->complete_event_flg = 1; p_ramrod->complete_event_flg = 1;
p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr); DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr);
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr);
rc = qed_spq_post(p_hwfn, p_ent, NULL); rc = qed_spq_post(p_hwfn, p_ent, NULL);
...@@ -640,21 +632,20 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, ...@@ -640,21 +632,20 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
{ {
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL; struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u16 abs_rx_q_id = 0; u16 abs_rx_q_id = 0;
int rc = -EINVAL; int rc = -EINVAL;
memset(&sp_params, 0, sizeof(sp_params)); /* Get SPQ entry */
sp_params.ramrod_data_size = sizeof(*p_ramrod); memset(&init_data, 0, sizeof(init_data));
sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; init_data.cid = p_rx_cid->cid;
init_data.opaque_fid = p_rx_cid->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, rc = qed_sp_init_request(p_hwfn, &p_ent,
p_rx_cid->cid,
p_rx_cid->opaque_fid,
ETH_RAMROD_RX_QUEUE_STOP, ETH_RAMROD_RX_QUEUE_STOP,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&sp_params);
if (rc) if (rc)
return rc; return rc;
...@@ -692,8 +683,8 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -692,8 +683,8 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
union qed_qm_pq_params *p_pq_params) union qed_qm_pq_params *p_pq_params)
{ {
struct tx_queue_start_ramrod_data *p_ramrod = NULL; struct tx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_tx_cid; struct qed_hw_cid_data *p_tx_cid;
u8 abs_vport_id; u8 abs_vport_id;
int rc = -EINVAL; int rc = -EINVAL;
...@@ -708,15 +699,15 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -708,15 +699,15 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
memset(&sp_params, 0, sizeof(sp_params)); /* Get SPQ entry */
sp_params.ramrod_data_size = sizeof(*p_ramrod); memset(&init_data, 0, sizeof(init_data));
sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; init_data.cid = cid;
init_data.opaque_fid = opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, cid, rc = qed_sp_init_request(p_hwfn, &p_ent,
opaque_fid,
ETH_RAMROD_TX_QUEUE_START, ETH_RAMROD_TX_QUEUE_START,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&sp_params);
if (rc) if (rc)
return rc; return rc;
...@@ -728,8 +719,7 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -728,8 +719,7 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->stats_counter_id = stats_id; p_ramrod->stats_counter_id = stats_id;
p_ramrod->pbl_size = cpu_to_le16(pbl_size); p_ramrod->pbl_size = cpu_to_le16(pbl_size);
p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
pq_id = qed_get_qm_pq(p_hwfn, pq_id = qed_get_qm_pq(p_hwfn,
PROTOCOLID_ETH, PROTOCOLID_ETH,
...@@ -796,20 +786,19 @@ static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, ...@@ -796,20 +786,19 @@ static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
u16 tx_queue_id) u16 tx_queue_id)
{ {
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL; int rc = -EINVAL;
memset(&sp_params, 0, sizeof(sp_params)); /* Get SPQ entry */
sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data); memset(&init_data, 0, sizeof(init_data));
sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; init_data.cid = p_tx_cid->cid;
init_data.opaque_fid = p_tx_cid->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, rc = qed_sp_init_request(p_hwfn, &p_ent,
p_tx_cid->cid,
p_tx_cid->opaque_fid,
ETH_RAMROD_TX_QUEUE_STOP, ETH_RAMROD_TX_QUEUE_STOP,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&sp_params);
if (rc) if (rc)
return rc; return rc;
...@@ -866,9 +855,9 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, ...@@ -866,9 +855,9 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
{ {
u8 vport_to_add_to = 0, vport_to_remove_from = 0; u8 vport_to_add_to = 0, vport_to_remove_from = 0;
struct vport_filter_update_ramrod_data *p_ramrod; struct vport_filter_update_ramrod_data *p_ramrod;
struct qed_sp_init_request_params sp_params;
struct eth_filter_cmd *p_first_filter; struct eth_filter_cmd *p_first_filter;
struct eth_filter_cmd *p_second_filter; struct eth_filter_cmd *p_second_filter;
struct qed_sp_init_data init_data;
enum eth_filter_action action; enum eth_filter_action action;
int rc; int rc;
...@@ -882,17 +871,16 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, ...@@ -882,17 +871,16 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
memset(&sp_params, 0, sizeof(sp_params)); /* Get SPQ entry */
sp_params.ramrod_data_size = sizeof(**pp_ramrod); memset(&init_data, 0, sizeof(init_data));
sp_params.comp_mode = comp_mode; init_data.cid = qed_spq_get_cid(p_hwfn);
sp_params.p_comp_data = p_comp_data; init_data.opaque_fid = opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, pp_ent, rc = qed_sp_init_request(p_hwfn, pp_ent,
qed_spq_get_cid(p_hwfn),
opaque_fid,
ETH_RAMROD_FILTERS_UPDATE, ETH_RAMROD_FILTERS_UPDATE,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&sp_params);
if (rc) if (rc)
return rc; return rc;
...@@ -1116,8 +1104,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, ...@@ -1116,8 +1104,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
{ {
unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = NULL; struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0; u8 abs_vport_id = 0;
int rc, i; int rc, i;
...@@ -1133,18 +1121,16 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, ...@@ -1133,18 +1121,16 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
memset(&sp_params, 0, sizeof(sp_params)); /* Get SPQ entry */
sp_params.ramrod_data_size = sizeof(*p_ramrod); memset(&init_data, 0, sizeof(init_data));
sp_params.comp_mode = comp_mode; init_data.cid = qed_spq_get_cid(p_hwfn);
sp_params.p_comp_data = p_comp_data; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, &p_ent, rc = qed_sp_init_request(p_hwfn, &p_ent,
qed_spq_get_cid(p_hwfn),
p_hwfn->hw_info.opaque_fid,
ETH_RAMROD_VPORT_UPDATE, ETH_RAMROD_VPORT_UPDATE,
PROTOCOLID_ETH, PROTOCOLID_ETH, &init_data);
&sp_params);
if (rc) { if (rc) {
DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
return rc; return rc;
......
...@@ -147,7 +147,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, ...@@ -147,7 +147,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
u32 size; u32 size;
/* Allocate mcp_info structure */ /* Allocate mcp_info structure */
p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC); p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
if (!p_hwfn->mcp_info) if (!p_hwfn->mcp_info)
goto err; goto err;
p_info = p_hwfn->mcp_info; p_info = p_hwfn->mcp_info;
...@@ -161,10 +161,10 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, ...@@ -161,10 +161,10 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
} }
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC); p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
p_info->mfw_mb_shadow = p_info->mfw_mb_shadow =
kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS( kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
p_info->mfw_mb_length), GFP_ATOMIC); p_info->mfw_mb_length), GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err; goto err;
......
...@@ -311,19 +311,20 @@ void qed_consq_free(struct qed_hwfn *p_hwfn, ...@@ -311,19 +311,20 @@ void qed_consq_free(struct qed_hwfn *p_hwfn,
#define QED_SP_EQ_COMPLETION 0x01 #define QED_SP_EQ_COMPLETION 0x01
#define QED_SP_CQE_COMPLETION 0x02 #define QED_SP_CQE_COMPLETION 0x02
struct qed_sp_init_request_params { struct qed_sp_init_data {
size_t ramrod_data_size; u32 cid;
u16 opaque_fid;
/* Information regarding operation upon sending & completion */
enum spq_mode comp_mode; enum spq_mode comp_mode;
struct qed_spq_comp_cb *p_comp_data; struct qed_spq_comp_cb *p_comp_data;
}; };
int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent, struct qed_spq_entry **pp_ent,
u32 cid,
u16 opaque_fid,
u8 cmd, u8 cmd,
u8 protocol, u8 protocol,
struct qed_sp_init_request_params *p_params); struct qed_sp_init_data *p_data);
/** /**
* @brief qed_sp_pf_start - PF Function Start Ramrod * @brief qed_sp_pf_start - PF Function Start Ramrod
......
...@@ -23,15 +23,13 @@ ...@@ -23,15 +23,13 @@
int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent, struct qed_spq_entry **pp_ent,
u32 cid,
u16 opaque_fid,
u8 cmd, u8 cmd,
u8 protocol, u8 protocol,
struct qed_sp_init_request_params *p_params) struct qed_sp_init_data *p_data)
{ {
int rc = -EINVAL; u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
u32 opaque_cid = opaque_fid << 16 | cid; int rc;
if (!pp_ent) if (!pp_ent)
return -ENOMEM; return -ENOMEM;
...@@ -48,7 +46,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -48,7 +46,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
p_ent->elem.hdr.protocol_id = protocol; p_ent->elem.hdr.protocol_id = protocol;
p_ent->priority = QED_SPQ_PRIORITY_NORMAL; p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
p_ent->comp_mode = p_params->comp_mode; p_ent->comp_mode = p_data->comp_mode;
p_ent->comp_done.done = 0; p_ent->comp_done.done = 0;
switch (p_ent->comp_mode) { switch (p_ent->comp_mode) {
...@@ -57,17 +55,17 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -57,17 +55,17 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
break; break;
case QED_SPQ_MODE_BLOCK: case QED_SPQ_MODE_BLOCK:
if (!p_params->p_comp_data) if (!p_data->p_comp_data)
return -EINVAL; return -EINVAL;
p_ent->comp_cb.cookie = p_params->p_comp_data->cookie; p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break; break;
case QED_SPQ_MODE_CB: case QED_SPQ_MODE_CB:
if (!p_params->p_comp_data) if (!p_data->p_comp_data)
p_ent->comp_cb.function = NULL; p_ent->comp_cb.function = NULL;
else else
p_ent->comp_cb = *p_params->p_comp_data; p_ent->comp_cb = *p_data->p_comp_data;
break; break;
default: default:
...@@ -83,8 +81,8 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -83,8 +81,8 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB")); "MODE_CB"));
if (p_params->ramrod_data_size)
memset(&p_ent->ramrod, 0, p_params->ramrod_data_size); memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0; return 0;
} }
...@@ -92,28 +90,26 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -92,28 +90,26 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
enum qed_mf_mode mode) enum qed_mf_mode mode)
{ {
struct qed_sp_init_request_params params;
struct pf_start_ramrod_data *p_ramrod = NULL; struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn); u16 sb = qed_int_get_sp_sb_id(p_hwfn);
u8 sb_index = p_hwfn->p_eq->eq_sb_index; u8 sb_index = p_hwfn->p_eq->eq_sb_index;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL; int rc = -EINVAL;
/* update initial eq producer */ /* update initial eq producer */
qed_eq_prod_update(p_hwfn, qed_eq_prod_update(p_hwfn,
qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
memset(&params, 0, sizeof(params)); memset(&init_data, 0, sizeof(init_data));
params.ramrod_data_size = sizeof(*p_ramrod); init_data.cid = qed_spq_get_cid(p_hwfn);
params.comp_mode = QED_SPQ_MODE_EBLOCK; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, rc = qed_sp_init_request(p_hwfn, &p_ent,
&p_ent,
qed_spq_get_cid(p_hwfn),
p_hwfn->hw_info.opaque_fid,
COMMON_RAMROD_PF_START, COMMON_RAMROD_PF_START,
PROTOCOLID_COMMON, PROTOCOLID_COMMON,
&params); &init_data);
if (rc) if (rc)
return rc; return rc;
...@@ -140,16 +136,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -140,16 +136,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */ /* Place EQ address in RAMROD */
p_ramrod->event_ring_pbl_addr.hi = DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); p_hwfn->p_eq->chain.pbl.p_phys_table);
p_ramrod->event_ring_pbl_addr.lo =
DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
p_ramrod->consolid_q_pbl_addr.hi = DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); p_hwfn->p_consq->chain.pbl.p_phys_table);
p_ramrod->consolid_q_pbl_addr.lo =
DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
p_hwfn->hw_info.personality = PERSONALITY_ETH; p_hwfn->hw_info.personality = PERSONALITY_ETH;
...@@ -163,17 +155,19 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -163,17 +155,19 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{ {
struct qed_sp_init_request_params params;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL; int rc = -EINVAL;
memset(&params, 0, sizeof(params)); /* Get SPQ entry */
params.comp_mode = QED_SPQ_MODE_EBLOCK; memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn), rc = qed_sp_init_request(p_hwfn, &p_ent,
p_hwfn->hw_info.opaque_fid,
COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
&params); &init_data);
if (rc) if (rc)
return rc; return rc;
......
...@@ -183,10 +183,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, ...@@ -183,10 +183,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt->xstorm_st_context.spq_base_hi = p_cxt->xstorm_st_context.spq_base_hi =
DMA_HI_LE(p_spq->chain.p_phys_addr); DMA_HI_LE(p_spq->chain.p_phys_addr);
p_cxt->xstorm_st_context.consolid_base_addr.lo = DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr); p_hwfn->p_consq->chain.p_phys_addr);
p_cxt->xstorm_st_context.consolid_base_addr.hi =
DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
} }
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
...@@ -327,7 +325,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, ...@@ -327,7 +325,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq; struct qed_eq *p_eq;
/* Allocate EQ struct */ /* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC); p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
if (!p_eq) { if (!p_eq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n"); DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
return NULL; return NULL;
...@@ -423,8 +421,7 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) ...@@ -423,8 +421,7 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_virt = p_spq->p_virt; p_virt = p_spq->p_virt;
for (i = 0; i < p_spq->chain.capacity; i++) { for (i = 0; i < p_spq->chain.capacity; i++) {
p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys); DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool); list_add_tail(&p_virt->list, &p_spq->free_pool);
...@@ -457,7 +454,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) ...@@ -457,7 +454,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
/* SPQ struct */ /* SPQ struct */
p_spq = p_spq =
kzalloc(sizeof(struct qed_spq), GFP_ATOMIC); kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
if (!p_spq) { if (!p_spq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n"); DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
return -ENOMEM; return -ENOMEM;
...@@ -853,7 +850,7 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) ...@@ -853,7 +850,7 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
struct qed_consq *p_consq; struct qed_consq *p_consq;
/* Allocate ConsQ struct */ /* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC); p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
if (!p_consq) { if (!p_consq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n"); DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
return NULL; return NULL;
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/qed/qed_eth_if.h> #include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8 #define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 4 #define QEDE_MINOR_VERSION 7
#define QEDE_REVISION_VERSION 0 #define QEDE_REVISION_VERSION 0
#define QEDE_ENGINEERING_VERSION 0 #define QEDE_ENGINEERING_VERSION 0
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
......
...@@ -19,6 +19,10 @@ ...@@ -19,6 +19,10 @@
/* dma_addr_t manip */ /* dma_addr_t manip */
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
#define DMA_REGPAIR_LE(x, val) do { \
(x).hi = DMA_HI_LE((val)); \
(x).lo = DMA_LO_LE((val)); \
} while (0)
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) #define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment