Commit cd23b14b authored by Eli Cohen's avatar Eli Cohen Committed by Roland Dreier

mlx5_core: Implement new initialization sequence

Introduce enbale_hca and disable_hca commands to signify when the
driver starts or ceases to operate on the device.

In addition the driver will use boot and init pages count; boot pages
is required to allow firmware to complete boot commands and the other
to complete init hca.  Command interface revision is bumped to 4 to
enforce using supported firmware.

This patch breaks compatibility with old versions of firmware (< 4);
however, the first GA firmware we will publish will support version 4
so this should not be a problem.
Signed-off-by: default avatarEli Cohen <eli@mellanox.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 11940c87
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#include "mlx5_core.h" #include "mlx5_core.h"
enum { enum {
CMD_IF_REV = 3, CMD_IF_REV = 4,
}; };
enum { enum {
...@@ -282,6 +282,12 @@ const char *mlx5_command_str(int command) ...@@ -282,6 +282,12 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_TEARDOWN_HCA: case MLX5_CMD_OP_TEARDOWN_HCA:
return "TEARDOWN_HCA"; return "TEARDOWN_HCA";
case MLX5_CMD_OP_ENABLE_HCA:
return "MLX5_CMD_OP_ENABLE_HCA";
case MLX5_CMD_OP_DISABLE_HCA:
return "MLX5_CMD_OP_DISABLE_HCA";
case MLX5_CMD_OP_QUERY_PAGES: case MLX5_CMD_OP_QUERY_PAGES:
return "QUERY_PAGES"; return "QUERY_PAGES";
......
...@@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) ...@@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
return err; return err;
} }
static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
{
int err;
struct mlx5_enable_hca_mbox_in in;
struct mlx5_enable_hca_mbox_out out;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return 0;
}
static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
{
int err;
struct mlx5_disable_hca_mbox_in in;
struct mlx5_disable_hca_mbox_out out;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return 0;
}
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
...@@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) ...@@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
} }
mlx5_pagealloc_init(dev); mlx5_pagealloc_init(dev);
err = mlx5_core_enable_hca(dev);
if (err) {
dev_err(&pdev->dev, "enable hca failed\n");
goto err_pagealloc_cleanup;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
dev_err(&pdev->dev, "failed to allocate boot pages\n");
goto err_disable_hca;
}
err = set_hca_ctrl(dev); err = set_hca_ctrl(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "set_hca_ctrl failed\n"); dev_err(&pdev->dev, "set_hca_ctrl failed\n");
goto err_pagealloc_cleanup; goto reclaim_boot_pages;
} }
err = handle_hca_cap(dev); err = handle_hca_cap(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "handle_hca_cap failed\n"); dev_err(&pdev->dev, "handle_hca_cap failed\n");
goto err_pagealloc_cleanup; goto reclaim_boot_pages;
} }
err = mlx5_satisfy_startup_pages(dev); err = mlx5_satisfy_startup_pages(dev, 0);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to allocate startup pages\n"); dev_err(&pdev->dev, "failed to allocate init pages\n");
goto err_pagealloc_cleanup; goto reclaim_boot_pages;
} }
err = mlx5_pagealloc_start(dev); err = mlx5_pagealloc_start(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
goto err_reclaim_pages; goto reclaim_boot_pages;
} }
err = mlx5_cmd_init_hca(dev); err = mlx5_cmd_init_hca(dev);
...@@ -396,9 +447,12 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) ...@@ -396,9 +447,12 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
err_pagealloc_stop: err_pagealloc_stop:
mlx5_pagealloc_stop(dev); mlx5_pagealloc_stop(dev);
err_reclaim_pages: reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev); mlx5_reclaim_startup_pages(dev);
err_disable_hca:
mlx5_core_disable_hca(dev);
err_pagealloc_cleanup: err_pagealloc_cleanup:
mlx5_pagealloc_cleanup(dev); mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev); mlx5_cmd_cleanup(dev);
...@@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev) ...@@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cmd_teardown_hca(dev); mlx5_cmd_teardown_hca(dev);
mlx5_pagealloc_stop(dev); mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev); mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev);
mlx5_pagealloc_cleanup(dev); mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev); mlx5_cmd_cleanup(dev);
iounmap(dev->iseg); iounmap(dev->iseg);
......
...@@ -64,7 +64,7 @@ struct mlx5_query_pages_inbox { ...@@ -64,7 +64,7 @@ struct mlx5_query_pages_inbox {
struct mlx5_query_pages_outbox { struct mlx5_query_pages_outbox {
struct mlx5_outbox_hdr hdr; struct mlx5_outbox_hdr hdr;
u8 reserved[2]; __be16 num_boot_pages;
__be16 func_id; __be16 func_id;
__be16 init_pages; __be16 init_pages;
__be16 num_pages; __be16 num_pages;
...@@ -146,7 +146,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) ...@@ -146,7 +146,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
} }
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s16 *pages, s16 *init_pages) s16 *pages, s16 *init_pages, u16 *boot_pages)
{ {
struct mlx5_query_pages_inbox in; struct mlx5_query_pages_inbox in;
struct mlx5_query_pages_outbox out; struct mlx5_query_pages_outbox out;
...@@ -164,8 +164,13 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, ...@@ -164,8 +164,13 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
if (pages) if (pages)
*pages = be16_to_cpu(out.num_pages); *pages = be16_to_cpu(out.num_pages);
if (init_pages) if (init_pages)
*init_pages = be16_to_cpu(out.init_pages); *init_pages = be16_to_cpu(out.init_pages);
if (boot_pages)
*boot_pages = be16_to_cpu(out.num_boot_pages);
*func_id = be16_to_cpu(out.func_id); *func_id = be16_to_cpu(out.func_id);
return err; return err;
...@@ -357,19 +362,22 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, ...@@ -357,19 +362,22 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
queue_work(dev->priv.pg_wq, &req->work); queue_work(dev->priv.pg_wq, &req->work);
} }
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{ {
u16 uninitialized_var(boot_pages);
s16 uninitialized_var(init_pages); s16 uninitialized_var(init_pages);
u16 uninitialized_var(func_id); u16 uninitialized_var(func_id);
int err; int err;
err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
&boot_pages);
if (err) if (err)
return err; return err;
mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id);
return give_pages(dev, func_id, init_pages, 0); mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
init_pages, boot_pages, func_id);
return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
} }
static int optimal_reclaimed_pages(void) static int optimal_reclaimed_pages(void)
......
...@@ -690,6 +690,26 @@ struct mlx5_query_cq_mbox_out { ...@@ -690,6 +690,26 @@ struct mlx5_query_cq_mbox_out {
__be64 pas[0]; __be64 pas[0];
}; };
struct mlx5_enable_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_enable_hca_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_disable_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_disable_hca_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_eq_context { struct mlx5_eq_context {
u8 status; u8 status;
u8 ec_oi; u8 ec_oi;
......
...@@ -101,6 +101,8 @@ enum { ...@@ -101,6 +101,8 @@ enum {
MLX5_CMD_OP_QUERY_ADAPTER = 0x101, MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102, MLX5_CMD_OP_INIT_HCA = 0x102,
MLX5_CMD_OP_TEARDOWN_HCA = 0x103, MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
MLX5_CMD_OP_ENABLE_HCA = 0x104,
MLX5_CMD_OP_DISABLE_HCA = 0x105,
MLX5_CMD_OP_QUERY_PAGES = 0x107, MLX5_CMD_OP_QUERY_PAGES = 0x107,
MLX5_CMD_OP_MANAGE_PAGES = 0x108, MLX5_CMD_OP_MANAGE_PAGES = 0x108,
MLX5_CMD_OP_SET_HCA_CAP = 0x109, MLX5_CMD_OP_SET_HCA_CAP = 0x109,
...@@ -690,7 +692,7 @@ int mlx5_pagealloc_start(struct mlx5_core_dev *dev); ...@@ -690,7 +692,7 @@ int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s16 npages); s16 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void); void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void); void mlx5_unregister_debugfs(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment