Commit a2956ecd authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-janitorial'

Yuval Mintz says:

====================
qed*: Janitorial series [semantic & prints]

Some day 1 slips in coding style exist in the qed* code
[incorrect alignments, conditions using (== 0), etc.].
This series comes to address those, and do some additional
cosmetic changes along the way [such as reducing the number of lines
for function declerations].

The series is broken to 3 parts - purely semantic changes, cosmetic
changes that required minor changes in the code, and print-related
changes. All-in-all, no real change in driver behavior is expected.

[This is a repost; Original was sent when net-next closed].

Please consider applying this to `net-next'.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 54be3d98 525ef5c0
......@@ -377,9 +377,8 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
}
}
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 *vf_cid)
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid)
{
if (vf_cid)
*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
......@@ -405,10 +404,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
return cnt;
}
static void
qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
u8 seg, u8 seg_type, u32 count, bool has_fl)
static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
u8 seg,
u8 seg_type, u32 count, bool has_fl)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
......@@ -420,8 +419,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
u32 start_line, u32 total_size,
u32 elem_size)
u32 start_line, u32 total_size, u32 elem_size)
{
u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
......@@ -448,8 +446,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
p_cli->first.val = *p_line;
p_cli->active = true;
*p_line += DIV_ROUND_UP(p_blk->total_size,
p_blk->real_size_in_page);
*p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
p_cli->last.val = *p_line - 1;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
......@@ -926,12 +923,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
void *p_virt;
u32 size;
size = min_t(u32, sz_left,
p_blk->real_size_in_page);
size = min_t(u32, sz_left, p_blk->real_size_in_page);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
size,
&p_phys,
GFP_KERNEL);
size, &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
memset(p_virt, 0, size);
......@@ -976,7 +970,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
if (rc != 0)
if (rc)
goto ilt_shadow_fail;
}
for (k = 0; k < p_mngr->vf_count; k++) {
......@@ -985,7 +979,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
p_blk = &clients[i].vf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
if (rc != 0)
if (rc)
goto ilt_shadow_fail;
}
}
......@@ -1672,7 +1666,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
tm_offset += tm_iids.pf_tids[i];
}
......@@ -1702,8 +1696,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 *p_cid)
enum protocol_type type, u32 *p_cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 rel_cid;
......@@ -1717,8 +1710,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
p_mngr->acquired[type].max_count);
if (rel_cid >= p_mngr->acquired[type].max_count) {
DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
type);
DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
return -EINVAL;
}
......@@ -1730,8 +1722,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
}
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
u32 cid,
enum protocol_type *p_type)
u32 cid, enum protocol_type *p_type)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
......@@ -1763,8 +1754,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
return true;
}
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid)
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
enum protocol_type type;
......@@ -1781,8 +1771,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
}
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info)
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
......
This diff is collapsed.
This diff is collapsed.
......@@ -59,17 +59,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
p_hwfn->rt_data.b_valid[i] = false;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 val)
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
u32 rt_offset, u32 *p_val,
size_t size)
u32 rt_offset, u32 *p_val, size_t size)
{
size_t i;
......@@ -81,10 +78,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
static int qed_init_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u16 rt_offset,
u16 size,
bool b_must_dmae)
u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
......@@ -102,8 +96,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
qed_wr(p_hwfn, p_ptt, addr + (i << 2),
p_init_val[i]);
qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
continue;
}
......@@ -115,7 +108,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
if (rc != 0)
if (rc)
return rc;
/* Jump over the entire segment, including invalid entry */
......@@ -182,9 +175,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u32 fill,
u32 fill_count)
u32 addr, u32 fill, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
......@@ -199,15 +190,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
addr, fill_count,
QED_DMAE_FLAG_RW_REPL_SRC);
addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
}
static void qed_init_fill(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u32 fill,
u32 fill_count)
u32 addr, u32 fill, u32 fill_count)
{
u32 i;
......@@ -218,12 +206,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn,
static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_write_op *cmd,
bool b_must_dmae,
bool b_can_dmae)
bool b_must_dmae, bool b_can_dmae)
{
u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
u32 data = le32_to_cpu(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
u32 offset, output_len, input_len, max_size;
struct qed_dev *cdev = p_hwfn->cdev;
union init_array_hdr *hdr;
......@@ -233,8 +221,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
array_data = cdev->fw_data->arr_data;
hdr = (union init_array_hdr *)(array_data +
dmae_array_offset);
hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
data = le32_to_cpu(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
......@@ -290,13 +277,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
/* init_ops write command */
static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_write_op *cmd,
bool b_can_dmae)
struct init_write_op *p_cmd, bool b_can_dmae)
{
u32 data = le32_to_cpu(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
u32 data = le32_to_cpu(p_cmd->data);
bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
union init_write_args *arg = &cmd->args;
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
union init_write_args *arg = &p_cmd->args;
int rc = 0;
/* Sanitize */
......@@ -309,20 +295,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
case INIT_SRC_INLINE:
qed_wr(p_hwfn, p_ptt, addr,
le32_to_cpu(arg->inline_val));
data = le32_to_cpu(p_cmd->args.inline_val);
qed_wr(p_hwfn, p_ptt, addr, data);
break;
case INIT_SRC_ZEROS:
if (b_must_dmae ||
(b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
le32_to_cpu(arg->zeros_count));
data = le32_to_cpu(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
else
qed_init_fill(p_hwfn, p_ptt, addr, 0,
le32_to_cpu(arg->zeros_count));
qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
case INIT_SRC_ARRAY:
rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
......@@ -353,8 +337,7 @@ static inline bool comp_or(u32 val, u32 expected_val)
/* init_ops read/poll commands */
static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_read_op *cmd)
struct qed_ptt *p_ptt, struct init_read_op *cmd)
{
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = QED_INIT_POLL_PERIOD_US, val;
......@@ -412,35 +395,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
u16 *offset,
int modes)
u16 *p_offset, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
modes_tree_buf = cdev->fw_data->modes_tree_buf;
tree_val = modes_tree_buf[(*offset)++];
tree_val = modes_tree_buf[(*p_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
case INIT_MODE_OP_OR:
arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 | arg2;
case INIT_MODE_OP_AND:
arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 & arg2;
default:
tree_val -= MAX_INIT_MODE_OPS;
return (modes & (1 << tree_val)) ? 1 : 0;
return (modes & BIT(tree_val)) ? 1 : 0;
}
}
static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
struct init_if_mode_op *p_cmd,
int modes)
struct init_if_mode_op *p_cmd, int modes)
{
u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
......@@ -453,8 +434,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
u32 phase,
u32 phase_id)
u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
u32 op_data = le32_to_cpu(p_cmd->op_data);
......@@ -468,10 +448,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
}
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
int phase,
int phase_id,
int modes)
struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
......
This diff is collapsed.
This diff is collapsed.
......@@ -51,8 +51,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME);
static int __init qed_init(void)
{
pr_notice("qed_init called\n");
pr_info("%s", version);
return 0;
......@@ -106,8 +104,7 @@ static void qed_free_pci(struct qed_dev *cdev)
/* Performs PCI initializations as well as initializing PCI-related parameters
* in the device structrue. Returns 0 in case of success.
*/
static int qed_init_pci(struct qed_dev *cdev,
struct pci_dev *pdev)
static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
{
u8 rev_id;
int rc;
......@@ -263,8 +260,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
}
/* Sets the requested power state */
static int qed_set_power_state(struct qed_dev *cdev,
pci_power_t state)
static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
{
if (!cdev)
return -ENODEV;
......@@ -366,8 +362,8 @@ static int qed_enable_msix(struct qed_dev *cdev,
DP_NOTICE(cdev,
"Trying to enable MSI-X with less vectors (%d out of %d)\n",
cnt, int_params->in.num_vectors);
rc = pci_enable_msix_exact(cdev->pdev,
int_params->msix_table, cnt);
rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
cnt);
if (!rc)
rc = cnt;
}
......@@ -439,6 +435,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
}
out:
if (!rc)
DP_INFO(cdev, "Using %s interrupts\n",
int_params->out.int_mode == QED_INT_MODE_INTA ?
"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
"MSI" : "MSIX");
cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
return rc;
......@@ -514,19 +515,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{
struct qed_dev *cdev = hwfn->cdev;
u32 int_mode;
int rc = 0;
u8 id;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
int_mode = cdev->int_params.out.int_mode;
if (int_mode == QED_INT_MODE_MSIX) {
id = hwfn->my_id;
snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
id, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[id].vector,
qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
if (!rc)
DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
"Requested slowpath MSI-X\n");
} else {
unsigned long flags = 0;
......@@ -541,6 +541,13 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
flags, cdev->name, cdev);
}
if (rc)
DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
else
DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
"Requested slowpath %s\n",
(int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
return rc;
}
......@@ -974,8 +981,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
}
static u32 qed_sb_release(struct qed_dev *cdev,
struct qed_sb_info *sb_info,
u16 sb_id)
struct qed_sb_info *sb_info, u16 sb_id)
{
struct qed_hwfn *p_hwfn;
int hwfn_index;
......
......@@ -54,8 +54,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
return true;
}
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PORT);
......@@ -68,8 +67,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
}
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
u32 tmp, i;
......@@ -99,8 +97,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
u32 drv_mb_offsize, mfw_mb_offsize;
......@@ -143,8 +140,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
return 0;
}
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info;
u32 size;
......@@ -165,9 +161,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
p_info->mfw_mb_shadow =
kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
p_info->mfw_mb_length), GFP_KERNEL);
p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
......@@ -189,8 +183,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
* access is achieved by setting a blocking flag, which will fail other
* competing contexts to send their mailboxes.
*/
static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
u32 cmd)
static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
{
spin_lock_bh(&p_hwfn->mcp_info->lock);
......@@ -221,15 +214,13 @@ static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
return 0;
}
static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
u32 cmd)
static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
{
if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
spin_unlock_bh(&p_hwfn->mcp_info->lock);
}
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
u8 delay = CHIP_MCP_RESP_ITER_US;
......@@ -326,7 +317,8 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
} else {
/* FW BUG! */
DP_ERR(p_hwfn, "MFW failed to respond!\n");
DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
cmd, param);
*o_mcp_resp = 0;
rc = -EAGAIN;
}
......@@ -342,7 +334,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
......@@ -399,8 +391,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
}
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_load_code)
struct qed_ptt *p_ptt, u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params;
......@@ -527,8 +518,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
"Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
transceiver_state,
(u32)(p_hwfn->mcp_info->port_addr +
offsetof(struct public_port,
transceiver_data)));
offsetof(struct public_port, transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state,
ETH_TRANSCEIVER_STATE);
......@@ -540,8 +530,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
}
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_reset)
struct qed_ptt *p_ptt, bool b_reset)
{
struct qed_mcp_link_state *p_link;
u8 max_bw, min_bw;
......@@ -557,8 +546,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
"Received link update [0x%08x] from mfw [Addr 0x%x]\n",
status,
(u32)(p_hwfn->mcp_info->port_addr +
offsetof(struct public_port,
link_status)));
offsetof(struct public_port, link_status)));
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link indications\n");
......@@ -755,8 +743,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct public_func *p_data,
int pfid)
struct public_func *p_data, int pfid)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
......@@ -766,8 +753,7 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
memset(p_data, 0, sizeof(*p_data));
size = min_t(u32, sizeof(*p_data),
QED_SECTION_SIZE(mfw_path_offsize));
size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
for (i = 0; i < size / sizeof(u32); i++)
((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
func_addr + (i << 2));
......@@ -802,15 +788,13 @@ int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
struct public_func shmem_info;
u32 resp = 0, param = 0;
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
MCP_PF_ID(p_hwfn));
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
qed_read_pf_bandwidth(p_hwfn, &shmem_info);
......@@ -943,8 +927,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0;
}
int qed_mcp_get_media_type(struct qed_dev *cdev,
u32 *p_media_type)
int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
......@@ -953,7 +936,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
......@@ -1006,15 +989,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_mcp_function_info *info;
struct public_func shmem_info;
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
MCP_PF_ID(p_hwfn));
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
info = &p_hwfn->mcp_info->func_info;
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
&info->protocol)) {
if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return -EINVAL;
......@@ -1075,15 +1056,13 @@ struct qed_mcp_link_capabilities
return &p_hwfn->mcp_info->link_capabilities;
}
int qed_mcp_drain(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_NIG_DRAIN, 1000,
&resp, &param);
DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
/* Wait for the drain to complete before returning */
msleep(1020);
......@@ -1092,8 +1071,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
}
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_flash_size)
struct qed_ptt *p_ptt, u32 *p_flash_size)
{
u32 flash_size;
......@@ -1171,8 +1149,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_led_mode mode)
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum qed_led_mode mode)
{
u32 resp = 0, param = 0, drv_mb_param;
int rc;
......
......@@ -25,9 +25,7 @@
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd,
u8 protocol,
struct qed_sp_init_data *p_data)
u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct qed_spq_entry *p_ent = NULL;
......@@ -38,7 +36,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
rc = qed_spq_get_entry(p_hwfn, pp_ent);
if (rc != 0)
if (rc)
return rc;
p_ent = *pp_ent;
......@@ -321,8 +319,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
PROTOCOLID_COMMON,
&init_data);
PROTOCOLID_COMMON, &init_data);
if (rc)
return rc;
......@@ -356,8 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
......@@ -389,8 +385,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,
p_ramrod->outer_tag);
sb, sb_index, p_ramrod->outer_tag);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
......
......@@ -41,8 +41,7 @@
***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
union event_ring_data *data,
u8 fw_return_code)
union event_ring_data *data, u8 fw_return_code)
{
struct qed_spq_comp_done *comp_done;
......@@ -109,9 +108,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
static int
qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
p_ent->flags = 0;
......@@ -189,8 +187,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq,
struct qed_spq_entry *p_ent)
struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
{
struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
u16 echo = qed_chain_get_prod_idx(p_chain);
......@@ -255,8 +252,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
/***************************************************************************
* EQ API
***************************************************************************/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
u16 prod)
void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{
u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
......@@ -267,9 +263,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
mmiowb();
}
int qed_eq_completion(struct qed_hwfn *p_hwfn,
void *cookie)
int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
{
struct qed_eq *p_eq = cookie;
struct qed_chain *p_chain = &p_eq->chain;
......@@ -323,8 +317,7 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn,
return rc;
}
struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
u16 num_elem)
struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
struct qed_eq *p_eq;
......@@ -348,11 +341,8 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
}
/* register EQ completion on the SP SB */
qed_int_register_cb(p_hwfn,
qed_eq_completion,
p_eq,
&p_eq->eq_sb_index,
&p_eq->p_fw_cons);
qed_int_register_cb(p_hwfn, qed_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
return p_eq;
......@@ -361,14 +351,12 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
return NULL;
}
void qed_eq_setup(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq)
void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
qed_chain_reset(&p_eq->chain);
}
void qed_eq_free(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq)
void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
if (!p_eq)
return;
......@@ -379,10 +367,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn,
/***************************************************************************
* CQE API - manipulate EQ functionality
***************************************************************************/
static int qed_cqe_completion(
struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
{
if (IS_VF(p_hwfn->cdev))
return 0;
......@@ -463,8 +450,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
u32 capacity;
/* SPQ struct */
p_spq =
kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
if (!p_spq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
return -ENOMEM;
......@@ -525,9 +511,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
kfree(p_spq);
}
int
qed_spq_get_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent)
int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
......@@ -538,14 +522,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn,
if (list_empty(&p_spq->free_pool)) {
p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
if (!p_ent) {
DP_NOTICE(p_hwfn,
"Failed to allocate an SPQ entry for a pending ramrod\n");
rc = -ENOMEM;
goto out_unlock;
}
p_ent->queue = &p_spq->unlimited_pending;
} else {
p_ent = list_first_entry(&p_spq->free_pool,
struct qed_spq_entry,
list);
struct qed_spq_entry, list);
list_del(&p_ent->list);
p_ent->queue = &p_spq->pending;
}
......@@ -564,8 +549,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
}
void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
{
spin_lock_bh(&p_hwfn->p_spq->lock);
__qed_spq_return_entry(p_hwfn, p_ent);
......@@ -586,10 +570,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
*
* @return int
*/
static int
qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
enum spq_priority priority)
static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
enum spq_priority priority)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
......@@ -604,8 +587,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_en2;
p_en2 = list_first_entry(&p_spq->free_pool,
struct qed_spq_entry,
list);
struct qed_spq_entry, list);
list_del(&p_en2->list);
/* Copy the ring element physical pointer to the new
......@@ -655,8 +637,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
* Posting new Ramrods
***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
struct list_head *head,
u32 keep_reserve)
struct list_head *head, u32 keep_reserve)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
int rc;
......@@ -690,8 +671,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
break;
p_ent = list_first_entry(&p_spq->unlimited_pending,
struct qed_spq_entry,
list);
struct qed_spq_entry, list);
if (!p_ent)
return -EINVAL;
......@@ -705,8 +685,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
}
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *fw_return_code)
struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
......@@ -803,8 +782,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return -EINVAL;
spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
list) {
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
......@@ -846,15 +824,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
if (!found) {
DP_NOTICE(p_hwfn,
"Failed to find an entry this EQE completes\n");
"Failed to find an entry this EQE [echo %04x] completes\n",
le16_to_cpu(echo));
return -EEXIST;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Complete EQE [echo %04x]: func %p cookie %p)\n",
le16_to_cpu(echo),
p_ent->comp_cb.function, p_ent->comp_cb.cookie);
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
else
DP_VERBOSE(p_hwfn,
QED_MSG_SPQ,
"Got a completion without a callback function\n");
if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
......@@ -901,14 +886,12 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
return NULL;
}
void qed_consq_setup(struct qed_hwfn *p_hwfn,
struct qed_consq *p_consq)
void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
qed_chain_reset(&p_consq->chain);
}
void qed_consq_free(struct qed_hwfn *p_hwfn,
struct qed_consq *p_consq)
void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
if (!p_consq)
return;
......
......@@ -699,7 +699,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
&qzone_id);
reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
qed_wr(p_hwfn, p_ptt, reg_addr, val);
}
}
......@@ -1090,13 +1090,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
if (!(tlvs_mask & (1 << i)))
if (!(tlvs_mask & BIT(i)))
continue;
resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
qed_iov_vport_to_tlv(p_hwfn, i), size);
if (tlvs_accepted & (1 << i))
if (tlvs_accepted & BIT(i))
resp->hdr.status = status;
else
resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
......@@ -1334,8 +1334,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
pfdev_info->minor_fp_hsi = min_t(u8,
ETH_HSI_VER_MINOR,
pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
......@@ -1438,14 +1437,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
filter.type = QED_FILTER_VLAN;
filter.vlan = p_vf->shadow_config.vlans[i].vid;
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
rc = qed_sp_eth_filter_ucast(p_hwfn,
p_vf->opaque_fid,
&filter,
QED_SPQ_MODE_CB, NULL);
rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
&filter, QED_SPQ_MODE_CB, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to configure VLAN [%04x] to VF [%04x]\n",
......@@ -1463,7 +1459,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
{
int rc = 0;
if ((events & (1 << VLAN_ADDR_FORCED)) &&
if ((events & BIT(VLAN_ADDR_FORCED)) &&
!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
......@@ -1479,7 +1475,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return -EINVAL;
if (events & (1 << MAC_ADDR_FORCED)) {
if (events & BIT(MAC_ADDR_FORCED)) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
......@@ -1502,7 +1498,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
if (events & (1 << VLAN_ADDR_FORCED)) {
if (events & BIT(VLAN_ADDR_FORCED)) {
struct qed_sp_vport_update_params vport_update;
u8 removal;
int i;
......@@ -1572,7 +1568,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (filter.vlan)
p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
else
p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
}
/* If forced features are terminated, we need to configure the shadow
......@@ -1619,8 +1615,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
qed_int_cau_conf_sb(p_hwfn, p_ptt,
start->sb_addr[sb_id],
vf->igu_sbs[sb_id],
vf->abs_vf_id, 1);
vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
}
qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
......@@ -1632,7 +1627,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
* vfs that would still be fine, since they passed '0' as padding].
*/
p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
u8 vf_req = start->only_untagged;
vf_info->bulletin.p_virt->default_only_untagged = vf_req;
......@@ -1652,7 +1647,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
params.mtu = vf->mtu;
rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc != 0) {
if (rc) {
DP_ERR(p_hwfn,
"qed_iov_vf_mbx_start_vport returned error %d\n", rc);
status = PFVF_STATUS_FAILURE;
......@@ -1679,7 +1674,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->spoof_chk = false;
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc != 0) {
if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
rc);
status = PFVF_STATUS_FAILURE;
......@@ -2045,7 +2040,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
/* Ignore the VF request if we're forcing a vlan */
if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
p_data->update_inner_vlan_removal_flg = 1;
p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
}
......@@ -2340,7 +2335,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
/* In forced mode, we're willing to remove entries - but we don't add
* new ones.
*/
if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
return 0;
if (p_params->opcode == QED_FILTER_ADD ||
......@@ -2374,7 +2369,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
int i;
/* If we're in forced-mode, we don't allow any change */
if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
return 0;
/* First remove entries and then add new ones */
......@@ -2509,7 +2504,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
}
/* Determine if the unicast filtering is acceptible by PF */
if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
/* Once VLAN is forced or PVID is set, do not allow
......@@ -2521,7 +2516,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
(params.type == QED_FILTER_MAC ||
params.type == QED_FILTER_MAC_VLAN)) {
if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
......@@ -2749,7 +2744,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
/* Mark VF for ack and clean pending state */
if (p_vf->state == VF_RESET)
p_vf->state = VF_STOPPED;
ack_vfs[vfid / 32] |= (1 << (vfid % 32));
ack_vfs[vfid / 32] |= BIT((vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
......@@ -2805,7 +2800,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
continue;
vfid = p_vf->abs_vf_id;
if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
u16 rel_vf_id = p_vf->relative_vf_id;
......@@ -3064,8 +3059,7 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
/* Forced MAC will disable MAC_ADDR */
vf_info->bulletin.p_virt->valid_bitmap &=
~(1 << VFPF_BULLETIN_MAC_ADDR);
vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
......@@ -3163,7 +3157,7 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
if (!p_vf || !p_vf->bulletin.p_virt)
return NULL;
if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
return NULL;
return p_vf->bulletin.p_virt->mac;
......@@ -3177,7 +3171,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
if (!p_vf || !p_vf->bulletin.p_virt)
return 0;
if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
return 0;
return p_vf->bulletin.p_virt->pvid;
......
......@@ -440,8 +440,7 @@ static u32 qede_get_msglevel(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
edev->dp_module;
return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
}
static void qede_set_msglevel(struct net_device *ndev, u32 level)
......@@ -465,8 +464,7 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_params link_params;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
"Link settings are not allowed to be changed\n");
DP_INFO(edev, "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment