Commit 01555e64 authored by David S. Miller's avatar David S. Miller

Merge tag 'shared-for-4.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma

Saeed Mahameed says:

====================
Mellanox mlx5 core driver updates 2016-08-20

This series contains several low level and API updates for mlx5 core
commands interface and mlx5_ifc.h to be shared as base code for net-next and
rdma mlx5 4.9 submissions.

From Saeed, ten patches that refactors old layouts of firmware commands which
were manually generated before we introduced the mlx5_ifc, now all of the firmware
commands inbox/outbox layouts moved to use mlx5_ifc and we remove the old
manually generated structures.  Plus to those ten patches, we add two patches
that unifies mlx5 commands execution interface and improve the driver log messages
in that area.

From Hadar and Ilya, added the needed hardware bits and infrastructure for
minimum inline headers setting and encap/decap commands and capabilities,
needed for E-Switch offloads.

This series applies on top latest net-next and rdma/master, and smoothly merges with
the latest "Mellanox 100G mlx5 fixes 2016-08-16" series already applied into net branch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2d03d439 575ddf58
......@@ -747,14 +747,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, struct mlx5_create_cq_mbox_in **cqb,
int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_ib_create_cq ucmd;
size_t ucmdlen;
int page_shift;
__be64 *pas;
int npages;
int ncont;
void *cqc;
int err;
ucmdlen =
......@@ -792,14 +794,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_db;
}
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
(*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = to_mucontext(context)->uuari.uars[0].index;
......@@ -834,9 +842,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size,
struct mlx5_create_cq_mbox_in **cqb,
int *index, int *inlen)
u32 **cqb, int *index, int *inlen)
{
__be64 *pas;
void *cqc;
int err;
err = mlx5_db_alloc(dev->mdev, &cq->db);
......@@ -853,15 +862,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
init_cq_buf(cq, &cq->buf);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_buf;
}
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_fill_page_array(&cq->buf.buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uuari.uars[0].index;
return 0;
......@@ -895,11 +910,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
{
int entries = attr->cqe;
int vector = attr->comp_vector;
struct mlx5_create_cq_mbox_in *cqb = NULL;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq;
int uninitialized_var(index);
int uninitialized_var(inlen);
u32 *cqb = NULL;
void *cqc;
int cqe_size;
unsigned int irqn;
int eqn;
......@@ -945,19 +961,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
cq->cqe_size = cqe_size;
cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
cqb->ctx.cqe_sz_flags |= (1 << 1);
cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
if (err)
goto err_cqb;
cqb->ctx.c_eqn = cpu_to_be16(eqn);
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
cq->cqe_size = cqe_size;
cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
if (err)
......@@ -1088,27 +1105,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
struct mlx5_modify_cq_mbox_in *in;
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
int err;
u32 fsel;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
in->cqn = cpu_to_be32(mcq->mcq.cqn);
fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
in->ctx.cq_period = cpu_to_be16(cq_period);
in->ctx.cq_max_count = cpu_to_be16(cq_count);
in->field_select = cpu_to_be32(fsel);
err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
kfree(in);
err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
cq_period, cq_count);
if (err)
mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
......@@ -1241,9 +1246,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_modify_cq_mbox_in *in;
void *cqc;
u32 *in;
int err;
int npas;
__be64 *pas;
int page_shift;
int inlen;
int uninitialized_var(cqe_size);
......@@ -1285,28 +1292,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
if (err)
goto ex;
inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto ex_resize;
}
pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
if (udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
in->pas, 0);
pas, 0);
else
mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
MLX5_MODIFY_CQ_MASK_PG_OFFSET |
MLX5_MODIFY_CQ_MASK_PG_SIZE);
in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
in->ctx.page_offset = 0;
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
in->cqn = cpu_to_be32(cq->mcq.cqn);
mlx5_fill_page_array(&cq->resize_buf->buf, pas);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.resize_field_select.resize_field_select,
MLX5_MODIFY_CQ_MASK_LOG_SIZE |
MLX5_MODIFY_CQ_MASK_PG_OFFSET |
MLX5_MODIFY_CQ_MASK_PG_SIZE);
cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err)
......
......@@ -233,23 +233,19 @@ static int set_roce_addr(struct ib_device *device, u8 port_num,
const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
struct mlx5_ib_dev *dev = to_mdev(device);
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)];
u32 out[MLX5_ST_SZ_DW(set_roce_address_out)];
struct mlx5_ib_dev *dev = to_mdev(device);
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
if (ll != IB_LINK_LAYER_ETHERNET)
return -EINVAL;
memset(in, 0, sizeof(in));
ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
......
......@@ -504,7 +504,7 @@ struct mlx5_ib_mr {
int umred;
int npages;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
int live;
void *descs_alloc;
......
This diff is collapsed.
This diff is collapsed.
......@@ -134,33 +134,29 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
complete(&cq->free);
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_in *in, int inlen)
u32 *in, int inlen)
{
int err;
struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_create_cq_mbox_out out;
struct mlx5_destroy_cq_mbox_in din;
struct mlx5_destroy_cq_mbox_out dout;
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn);
struct mlx5_eq *eq;
int err;
eq = mlx5_eqn2eq(dev, eqn);
if (IS_ERR(eq))
return PTR_ERR(eq);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
memset(&out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
memset(out, 0, sizeof(out));
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
......@@ -186,10 +182,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
err_cmd:
memset(&din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout));
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
......@@ -197,8 +194,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_destroy_cq_mbox_in in;
struct mlx5_destroy_cq_mbox_out out;
u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
struct mlx5_core_cq *tmp;
int err;
......@@ -214,17 +211,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
return -EINVAL;
}
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
in.cqn = cpu_to_be32(cq->cqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
......@@ -237,44 +229,23 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out)
u32 *out, int outlen)
{
struct mlx5_query_cq_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
memset(out, 0, sizeof(*out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
in.cqn = cpu_to_be32(cq->cqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
return err;
MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
MLX5_SET(query_cq_in, in, cqn, cq->cqn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz)
u32 *in, int inlen)
{
struct mlx5_modify_cq_mbox_out out;
int err;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
if (err)
return err;
u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return 0;
MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
......@@ -283,18 +254,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period,
u16 cq_max_count)
{
struct mlx5_modify_cq_mbox_in in;
memset(&in, 0, sizeof(in));
in.cqn = cpu_to_be32(cq->cqn);
in.ctx.cq_period = cpu_to_be16(cq_period);
in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
MLX5_CQ_MODIFY_COUNT);
return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
void *cqc;
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, cq_period, cq_period);
MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.modify_field_select.modify_field_select,
MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
......
......@@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index, int *is_str)
{
struct mlx5_query_qp_mbox_out *out;
int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
struct mlx5_qp_context *ctx;
u64 param = 0;
u32 *out;
int err;
int no_sq;
out = kzalloc(sizeof(*out), GFP_KERNEL);
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
err = mlx5_core_qp_query(dev, qp, out, sizeof(*out));
err = mlx5_core_qp_query(dev, qp, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query qp\n");
mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
goto out;
}
*is_str = 0;
ctx = &out->ctx;
/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
switch (index) {
case QP_PID:
param = qp->pid;
......@@ -358,32 +362,32 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
struct mlx5_query_eq_mbox_out *out;
struct mlx5_eq_context *ctx;
int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
u64 param = 0;
void *ctx;
u32 *out;
int err;
out = kzalloc(sizeof(*out), GFP_KERNEL);
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
ctx = &out->ctx;
err = mlx5_core_eq_query(dev, eq, out, sizeof(*out));
err = mlx5_core_eq_query(dev, eq, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
}
ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
switch (index) {
case EQ_NUM_EQES:
param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
break;
case EQ_INTR:
param = ctx->intr;
param = MLX5_GET(eqc, ctx, intr);
break;
case EQ_LOG_PG_SZ:
param = (ctx->log_page_size & 0x1f) + 12;
param = MLX5_GET(eqc, ctx, log_page_size) + 12;
break;
}
......@@ -395,37 +399,37 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int index)
{
struct mlx5_query_cq_mbox_out *out;
struct mlx5_cq_context *ctx;
int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
u64 param = 0;
void *ctx;
u32 *out;
int err;
out = kzalloc(sizeof(*out), GFP_KERNEL);
out = mlx5_vzalloc(outlen);
if (!out)
return param;
ctx = &out->ctx;
err = mlx5_core_query_cq(dev, cq, out);
err = mlx5_core_query_cq(dev, cq, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
}
ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
switch (index) {
case CQ_PID:
param = cq->pid;
break;
case CQ_NUM_CQES:
param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
break;
case CQ_LOG_PG_SZ:
param = (ctx->log_pg_sz & 0x1f) + 12;
param = MLX5_GET(cqc, ctx, log_page_size);
break;
}
out:
kfree(out);
kvfree(out);
return param;
}
......
......@@ -60,24 +60,27 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
struct mlx5_core_mkey *mkey)
{
struct mlx5_create_mkey_mbox_in *in;
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
int err;
in = mlx5_vzalloc(sizeof(*in));
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
in->seg.flags = MLX5_PERM_LOCAL_WRITE |
MLX5_PERM_LOCAL_READ |
MLX5_ACCESS_MODE_PA;
in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
NULL);
MLX5_SET(mkc, mkc, pd, pdn);
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
kvfree(in);
err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
kvfree(in);
return err;
}
......
......@@ -726,7 +726,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 eth_proto_cap;
u32 eth_proto_admin;
u32 eth_proto_lp;
......@@ -736,7 +736,6 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",
__func__, err);
......
......@@ -180,18 +180,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_core_dev *mdev = priv->mdev;
memset(in, 0, sizeof(in));
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
memset(out, 0, outlen);
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
......@@ -2022,14 +2019,11 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(create_tis_in)];
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc << 1);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
......@@ -3228,35 +3222,34 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_mkey_seg *mkc;
int inlen = sizeof(*in);
u64 npages =
priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
u64 npages = priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
int err;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
mkc = &in->seg;
mkc->status = MLX5_MKEY_STATUS_FREE;
mkc->flags = MLX5_PERM_UMR_EN |
MLX5_PERM_LOCAL_READ |
MLX5_PERM_LOCAL_WRITE |
MLX5_ACCESS_MODE_MTT;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
mkc->log2_page_size = PAGE_SHIFT;
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
NULL, NULL);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
MLX5_SET(mkc, mkc, translations_octword_size,
mlx5e_get_mtt_octw(npages));
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
kvfree(in);
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
kvfree(in);
return err;
}
......
......@@ -416,8 +416,8 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
{
rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
if (!rep->priv_data) {
pr_warn("Failed to create representor for vport %d\n",
rep->vport);
mlx5_core_warn(esw->dev, "Failed to create representor for vport %d\n",
rep->vport);
return -EINVAL;
}
return 0;
......
......@@ -86,23 +86,12 @@ struct cre_des_eq {
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
struct mlx5_destroy_eq_mbox_in in;
struct mlx5_destroy_eq_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
in.eqn = eqn;
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (!err)
goto ex;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
ex:
return err;
MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
MLX5_SET(destroy_eq_in, in, eq_number, eqn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
......@@ -351,11 +340,13 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
struct mlx5_create_eq_mbox_in *in;
struct mlx5_create_eq_mbox_out out;
int err;
__be64 *pas;
void *eqc;
int inlen;
u32 *in;
int err;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
......@@ -365,35 +356,36 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
init_eq_buf(eq);
inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto err_buf;
}
memset(&out, 0, sizeof(out));
mlx5_fill_page_array(&eq->buf, in->pas);
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
mlx5_fill_page_array(&eq->buf, pas);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
in->ctx.intr = vecidx;
in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->events_mask = cpu_to_be64(mask);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
MLX5_SET64(create_eq_in, in, event_bitmask, mask);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err)
goto err_in;
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
MLX5_SET(eqc, eqc, uar_page, uar->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
goto err_in;
}
snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
......@@ -547,22 +539,12 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_query_eq_mbox_out *out, int outlen)
u32 *out, int outlen)
{
struct mlx5_query_eq_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
in.eqn = eq->eqn;
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
if (out->hdr.status)
err = mlx5_cmd_status_to_err(&out->hdr);
return err;
MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
......@@ -87,13 +87,9 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)];
int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
void *nic_vport_ctx;
int err;
memset(out, 0, sizeof(out));
memset(in, 0, sizeof(in));
MLX5_SET(modify_nic_vport_context_in, in,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
......@@ -116,45 +112,31 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
goto ex;
err = mlx5_cmd_status_to_err_v2(out);
if (err)
goto ex;
return 0;
ex:
return err;
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
/* E-Switch vport context HW commands */
static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
u16 *vlan, u8 *qos)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)];
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0};
int err;
bool cvlan_strip;
bool cvlan_insert;
memset(out, 0, sizeof(out));
*vlan = 0;
*qos = 0;
......@@ -188,27 +170,20 @@ static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
void *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
memset(out, 0, sizeof(out));
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
return mlx5_cmd_exec_check_status(dev, in, inlen,
out, sizeof(out));
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
u16 vlan, u8 qos, bool set)
{
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
......@@ -216,7 +191,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
vport, vlan, qos, set);
if (set) {
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_strip, 1);
......@@ -241,13 +215,10 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
u8 *mac, u8 vlan_valid, u16 vlan)
{
u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)];
u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)];
u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
u8 *in_mac_addr;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(set_l2_table_entry_in, in, opcode,
MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
MLX5_SET(set_l2_table_entry_in, in, table_index, index);
......@@ -257,23 +228,18 @@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
ether_addr_copy(&in_mac_addr[2], mac);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
{
u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)];
u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
MLX5_SET(delete_l2_table_entry_in, in, opcode,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
......@@ -340,7 +306,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
pr_warn("FDB: Failed to alloc match parameters\n");
esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
return NULL;
}
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
......@@ -374,8 +340,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
esw_warn(esw->dev,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
......@@ -1352,8 +1318,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
esw_warn(esw->dev,
"vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
vport->ingress.allow_rule = NULL;
goto out;
}
......@@ -1365,8 +1332,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
esw_warn(esw->dev,
"vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
vport->ingress.drop_rule = NULL;
goto out;
}
......@@ -1418,8 +1386,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
vport->vport, err);
esw_warn(esw->dev,
"vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
vport->vport, err);
vport->egress.allowed_vlan = NULL;
goto out;
}
......@@ -1432,8 +1401,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
esw_warn(esw->dev,
"vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
vport->egress.drop_rule = NULL;
}
out:
......@@ -1903,7 +1873,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
int err = 0;
u32 *out;
......@@ -1916,8 +1886,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
if (!out)
return -ENOMEM;
memset(in, 0, sizeof(in));
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
......
......@@ -88,4 +88,11 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u16 id,
u64 *packets, u64 *bytes);
int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
int header_type,
size_t size,
void *encap_header,
u32 *encap_id);
void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id);
#endif
......@@ -345,7 +345,7 @@ static void del_flow_table(struct fs_node *node)
err = mlx5_cmd_destroy_flow_table(dev, ft);
if (err)
pr_warn("flow steering can't destroy ft\n");
mlx5_core_warn(dev, "flow steering can't destroy ft\n");
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
}
......@@ -364,7 +364,7 @@ static void del_rule(struct fs_node *node)
match_value = mlx5_vzalloc(match_len);
if (!match_value) {
pr_warn("failed to allocate inbox\n");
mlx5_core_warn(dev, "failed to allocate inbox\n");
return;
}
......@@ -387,8 +387,9 @@ static void del_rule(struct fs_node *node)
modify_mask,
fte);
if (err)
pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
}
kvfree(match_value);
}
......@@ -409,8 +410,9 @@ static void del_fte(struct fs_node *node)
err = mlx5_cmd_delete_fte(dev, ft,
fte->index);
if (err)
pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id);
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id);
fte->status = 0;
fg->num_ftes--;
......@@ -427,8 +429,8 @@ static void del_flow_group(struct fs_node *node)
dev = get_dev(&ft->node);
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
pr_warn("flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
}
static struct fs_fte *alloc_fte(u8 action,
......
......@@ -38,13 +38,10 @@
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_board_id(struct mlx5_core_dev *dev)
......@@ -162,38 +159,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_init_hca_mbox_in in;
struct mlx5_cmd_init_hca_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
return err;
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_teardown_hca_mbox_in in;
struct mlx5_cmd_teardown_hca_mbox_out out;
int err;
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
return err;
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
......@@ -39,36 +39,33 @@
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
struct mlx5_mad_ifc_mbox_in *in = NULL;
struct mlx5_mad_ifc_mbox_out *out = NULL;
int err;
int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
int err = -ENOMEM;
void *data;
void *resp;
u32 *out;
u32 *in;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out) {
err = -ENOMEM;
in = kzalloc(inlen, GFP_KERNEL);
out = kzalloc(outlen, GFP_KERNEL);
if (!in || !out)
goto out;
}
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
in->hdr.opmod = cpu_to_be16(opmod);
in->port = port;
MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
MLX5_SET(mad_ifc_in, in, op_mod, opmod);
MLX5_SET(mad_ifc_in, in, port, port);
memcpy(in->data, inb, sizeof(in->data));
data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
goto out;
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
goto out;
}
memcpy(outb, out->data, sizeof(out->data));
resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
memcpy(outb, resp,
MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
out:
kfree(out);
......
......@@ -324,7 +324,7 @@ enum {
MLX5_DEV_CAP_FLAG_DCT,
};
static u16 to_fw_pkey_sz(u32 size)
static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
case 128:
......@@ -340,7 +340,7 @@ static u16 to_fw_pkey_sz(u32 size)
case 4096:
return 5;
default:
pr_warn("invalid pkey table size %d\n", size);
mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
return 0;
}
}
......@@ -363,10 +363,6 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto query_ex;
err = mlx5_cmd_status_to_err_v2(out);
if (err) {
mlx5_core_warn(dev,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
......@@ -409,20 +405,11 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
{
u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
int err;
memset(out, 0, sizeof(out));
u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
if (err)
return err;
err = mlx5_cmd_status_to_err_v2(out);
return err;
return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
}
static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
......@@ -490,7 +477,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
128);
/* we limit the size of the pkey table to 128 entries for now */
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
to_fw_pkey_sz(128));
to_fw_pkey_sz(dev, 128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
......@@ -528,37 +515,22 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
int err;
u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
memset(in, 0, sizeof(in));
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
return mlx5_cmd_status_to_err_v2(out);
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
int err;
u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
memset(in, 0, sizeof(in));
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
return mlx5_cmd_status_to_err_v2(out);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
......@@ -758,44 +730,40 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
{
u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
int err;
u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
u32 sup_issi;
memset(query_in, 0, sizeof(query_in));
memset(query_out, 0, sizeof(query_out));
int err;
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
query_out, sizeof(query_out));
err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
query_out, sizeof(query_out));
if (err) {
if (((struct mlx5_outbox_hdr *)query_out)->status ==
MLX5_CMD_STAT_BAD_OP_ERR) {
u32 syndrome;
u8 status;
mlx5_cmd_mbox_status(query_out, &status, &syndrome);
if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
pr_debug("Only ISSI 0 is supported\n");
return 0;
}
pr_err("failed to query ISSI\n");
pr_err("failed to query ISSI err(%d)\n", err);
return err;
}
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
if (sup_issi & (1 << 1)) {
memset(set_in, 0, sizeof(set_in));
memset(set_out, 0, sizeof(set_out));
u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
MLX5_SET(set_issi_in, set_in, current_issi, 1);
err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
set_out, sizeof(set_out));
err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
set_out, sizeof(set_out));
if (err) {
pr_err("failed to set ISSI=1\n");
pr_err("failed to set ISSI=1 err(%d)\n", err);
return err;
}
......@@ -1344,8 +1312,9 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
pr_warn("selected profile out of range, selecting default (%d)\n",
MLX5_DEFAULT_PROF);
mlx5_core_warn(dev,
"selected profile out of range, selecting default (%d)\n",
MLX5_DEFAULT_PROF);
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
......
......@@ -37,70 +37,30 @@
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
struct mlx5_attach_mcg_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
__be32 rsvd;
u8 gid[16];
};
struct mlx5_attach_mcg_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvf[8];
};
struct mlx5_detach_mcg_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
__be32 rsvd;
u8 gid[16];
};
struct mlx5_detach_mcg_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvf[8];
};
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
struct mlx5_attach_mcg_mbox_in in;
struct mlx5_attach_mcg_mbox_out out;
int err;
u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0};
void *gid;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
return err;
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
struct mlx5_detach_mcg_mbox_in in;
struct mlx5_detach_mcg_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0};
void *gid;
return err;
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);
......@@ -58,8 +58,8 @@ do { \
} while (0)
#define mlx5_core_err(__dev, format, ...) \
dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
(__dev)->priv.name, __func__, __LINE__, current->pid, \
dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
......@@ -75,19 +75,6 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
int in_size, u32 *out,
int out_size)
{
int err;
err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
if (err)
return err;
return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
}
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
......
......@@ -49,48 +49,43 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
{
}
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
struct mlx5_create_mkey_mbox_in *in, int inlen,
mlx5_cmd_cbk_t callback, void *context,
struct mlx5_create_mkey_mbox_out *out)
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen,
u32 *out, int outlen,
mlx5_cmd_cbk_t callback, void *context)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
struct mlx5_create_mkey_mbox_out lout;
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
u32 mkey_index;
void *mkc;
int err;
u8 key;
memset(&lout, 0, sizeof(lout));
spin_lock_irq(&dev->priv.mkey_lock);
key = dev->priv.mkey_key++;
spin_unlock_irq(&dev->priv.mkey_lock);
in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
if (callback) {
err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
callback, context);
return err;
} else {
err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
}
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
if (err) {
mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
return err;
}
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
MLX5_SET(mkc, mkc, mkey_7_0, key);
if (lout.hdr.status) {
mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
return mlx5_cmd_status_to_err(&lout.hdr);
}
if (callback)
return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
callback, context);
err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
if (err)
return err;
mkey->iova = be64_to_cpu(in->seg.start_addr);
mkey->size = be64_to_cpu(in->seg.len);
mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
mkey->pd = MLX5_GET(mkc, mkc, pd);
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
be32_to_cpu(lout.mkey), key, mkey->key);
mkey_index, key, mkey->key);
/* connect to mkey tree */
write_lock_irq(&table->lock);
......@@ -104,20 +99,25 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
return err;
}
EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen)
{
return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
NULL, 0, NULL, NULL);
}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
struct mlx5_destroy_mkey_mbox_in in;
struct mlx5_destroy_mkey_mbox_out out;
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
struct mlx5_core_mkey *deleted_mkey;
unsigned long flags;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
write_lock_irqsave(&table->lock, flags);
deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
......@@ -128,94 +128,71 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
return -ENOENT;
}
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return err;
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
struct mlx5_query_mkey_mbox_out *out, int outlen)
u32 *out, int outlen)
{
struct mlx5_query_mkey_mbox_in in;
int err;
u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey)
{
struct mlx5_query_special_ctxs_mbox_in in;
struct mlx5_query_special_ctxs_mbox_out out;
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
*mkey = be32_to_cpu(out.dump_fill_mkey);
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*mkey = MLX5_GET(query_special_contexts_out, out,
dump_fill_mkey);
return err;
}
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
static inline u32 mlx5_get_psv(u32 *out, int psv_index)
{
switch (psv_index) {
case 1: return MLX5_GET(create_psv_out, out, psv1_index);
case 2: return MLX5_GET(create_psv_out, out, psv2_index);
case 3: return MLX5_GET(create_psv_out, out, psv3_index);
default: return MLX5_GET(create_psv_out, out, psv0_index);
}
}
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
struct mlx5_allocate_psv_in in;
struct mlx5_allocate_psv_out out;
u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0};
int i, err;
if (npsvs > MLX5_MAX_PSVS)
return -EINVAL;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV);
MLX5_SET(create_psv_in, in, pd, pdn);
MLX5_SET(create_psv_in, in, num_psv, npsvs);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err) {
mlx5_core_err(dev, "cmd exec failed %d\n", err);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
}
if (out.hdr.status) {
mlx5_core_err(dev, "create_psv bad status %d\n",
out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr);
}
for (i = 0; i < npsvs; i++)
sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
sig_index[i] = mlx5_get_psv(out, i);
return err;
}
......@@ -223,29 +200,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
struct mlx5_destroy_psv_in in;
struct mlx5_destroy_psv_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0};
in.psv_number = cpu_to_be32(psv_num);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err) {
mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
goto out;
}
if (out.hdr.status) {
mlx5_core_err(dev, "destroy_psv bad status %d\n",
out.hdr.status);
err = mlx5_cmd_status_to_err(&out.hdr);
goto out;
}
out:
return err;
MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
MLX5_SET(destroy_psv_in, in, psvn, psv_num);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
......@@ -44,12 +44,6 @@ enum {
MLX5_PAGES_TAKE = 2
};
enum {
MLX5_BOOT_PAGES = 1,
MLX5_INIT_PAGES = 2,
MLX5_POST_INIT_PAGES = 3
};
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u16 func_id;
......@@ -67,33 +61,6 @@ struct fw_page {
unsigned free_count;
};
struct mlx5_query_pages_inbox {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_pages_outbox {
struct mlx5_outbox_hdr hdr;
__be16 rsvd;
__be16 func_id;
__be32 num_pages;
};
struct mlx5_manage_pages_inbox {
struct mlx5_inbox_hdr hdr;
__be16 rsvd;
__be16 func_id;
__be32 num_entries;
__be64 pas[0];
};
struct mlx5_manage_pages_outbox {
struct mlx5_outbox_hdr hdr;
__be32 num_entries;
u8 rsvd[4];
__be64 pas[0];
};
enum {
MAX_RECLAIM_TIME_MSECS = 5000,
MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
......@@ -167,24 +134,21 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
struct mlx5_query_pages_inbox in;
struct mlx5_query_pages_outbox out;
u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
MLX5_SET(query_pages_in, in, op_mod, boot ?
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
*npages = be32_to_cpu(out.num_pages);
*func_id = be16_to_cpu(out.func_id);
*npages = MLX5_GET(query_pages_out, out, num_pages);
*func_id = MLX5_GET(query_pages_out, out, function_id);
return err;
}
......@@ -280,46 +244,37 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int err;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
in->func_id = cpu_to_be16(func_id);
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
if (!err)
err = mlx5_cmd_status_to_err(&out.hdr);
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
mlx5_core_warn(dev, "page notify failed\n");
kfree(in);
mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
func_id, err);
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
int inlen;
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
int err;
u32 *in;
int i;
inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
goto out_free;
}
memset(&out, 0, sizeof(out));
for (i = 0; i < npages; i++) {
retry:
......@@ -332,27 +287,21 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
goto retry;
}
in->pas[i] = cpu_to_be64(addr);
MLX5_SET64(manage_pages_in, in, pas[i], addr);
}
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
in->func_id = cpu_to_be16(func_id);
in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_4k;
}
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
func_id, npages, out.hdr.status);
goto out_4k;
}
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
......@@ -364,7 +313,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, be64_to_cpu(in->pas[i]));
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
out_free:
kvfree(in);
if (notify_fail)
......@@ -373,8 +322,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
}
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
struct mlx5_manage_pages_inbox *in, int in_size,
struct mlx5_manage_pages_outbox *out, int out_size)
u32 *in, int in_size, u32 *out, int out_size)
{
struct fw_page *fwp;
struct rb_node *p;
......@@ -382,55 +330,54 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 i = 0;
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
(u32 *)out, out_size);
return mlx5_cmd_exec(dev, in, in_size, out, out_size);
npages = be32_to_cpu(in->num_entries);
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
p = rb_first(&dev->priv.page_root);
while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
out->pas[i] = cpu_to_be64(fwp->addr);
MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
p = rb_next(p);
i++;
}
out->num_entries = cpu_to_be32(i);
MLX5_SET(manage_pages_out, out, output_num_entries, i);
return 0;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
struct mlx5_manage_pages_inbox in;
struct mlx5_manage_pages_outbox *out;
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int num_claimed;
int outlen;
u64 addr;
u32 *out;
int err;
int i;
if (nclaimed)
*nclaimed = 0;
memset(&in, 0, sizeof(in));
outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
out = mlx5_vzalloc(outlen);
if (!out)
return -ENOMEM;
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
num_claimed = be32_to_cpu(out->num_entries);
num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
if (num_claimed > npages) {
mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
num_claimed, npages);
......@@ -438,10 +385,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto out_free;
}
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
if (nclaimed)
*nclaimed = num_claimed;
......@@ -518,8 +464,8 @@ static int optimal_reclaimed_pages(void)
int ret;
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
sizeof(struct mlx5_manage_pages_outbox)) /
FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
MLX5_ST_SZ_BYTES(manage_pages_out)) /
MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
return ret;
}
......
......@@ -36,66 +36,27 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
struct mlx5_alloc_pd_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_alloc_pd_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 pdn;
u8 rsvd[4];
};
struct mlx5_dealloc_pd_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 pdn;
u8 rsvd[4];
};
struct mlx5_dealloc_pd_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
struct mlx5_alloc_pd_mbox_in in;
struct mlx5_alloc_pd_mbox_out out;
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
*pdn = be32_to_cpu(out.pdn) & 0xffffff;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
}
EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
struct mlx5_dealloc_pd_mbox_in in;
struct mlx5_dealloc_pd_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD);
in.pdn = cpu_to_be32(pdn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0};
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return err;
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);
......@@ -63,19 +63,14 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
u32 rate, u16 index)
{
u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)];
u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
MLX5_SET(set_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_RATE_LIMIT);
MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
......
......@@ -155,13 +155,13 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pdev))
pci_disable_sriov(pdev);
else
pr_info("unloading PF driver while leaving orphan VFs\n");
mlx5_core_info(dev, "unloading PF driver while leaving orphan VFs\n");
return 0;
}
err = mlx5_core_sriov_enable(pdev, num_vfs);
if (err) {
dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err);
mlx5_core_warn(dev, "mlx5_core_sriov_enable failed %d\n", err);
return err;
}
......@@ -180,7 +180,8 @@ static int sync_required(struct pci_dev *pdev)
int cur_vfs = pci_num_vf(pdev);
if (cur_vfs != sriov->num_vfs) {
pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs);
mlx5_core_warn(dev, "current VFs %d, registered %d - sync needed\n",
cur_vfs, sriov->num_vfs);
return 1;
}
......
......@@ -175,8 +175,8 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(create_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_SRQ);
err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
sizeof(create_out));
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
sizeof(create_out));
kvfree(create_in);
if (!err)
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
......@@ -194,8 +194,8 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
......@@ -209,8 +209,8 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
......@@ -228,9 +228,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_srq_in, srq_in, opcode,
MLX5_CMD_OP_QUERY_SRQ);
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
srq_out,
MLX5_ST_SZ_BYTES(query_srq_out));
err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
if (err)
goto out;
......@@ -272,8 +271,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_XRC_SRQ);
memset(create_out, 0, sizeof(create_out));
err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
sizeof(create_out));
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
sizeof(create_out));
if (err)
goto out;
......@@ -286,36 +285,30 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
}
static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq, u16 lwm)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
}
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
......@@ -335,9 +328,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out,
MLX5_ST_SZ_BYTES(query_xrc_srq_out));
err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (err)
goto out;
......
......@@ -42,73 +42,28 @@ enum {
NUM_LOW_LAT_UUARS = 4,
};
struct mlx5_alloc_uar_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_alloc_uar_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 uarn;
u8 rsvd[4];
};
struct mlx5_free_uar_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 uarn;
u8 rsvd[4];
};
struct mlx5_free_uar_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
struct mlx5_alloc_uar_mbox_in in;
struct mlx5_alloc_uar_mbox_out out;
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
goto ex;
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
goto ex;
}
*uarn = be32_to_cpu(out.uarn) & 0xffffff;
ex:
MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*uarn = MLX5_GET(alloc_uar_out, out, uar);
return err;
}
EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
struct mlx5_free_uar_mbox_in in;
struct mlx5_free_uar_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
in.uarn = cpu_to_be32(uarn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
goto ex;
u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
ex:
return err;
MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
MLX5_SET(dealloc_uar_in, in, uar, uarn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
......
......@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_in *in, int inlen);
u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out);
u32 *out, int outlen);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz);
u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment