Commit 28c66cfa authored by Ajay Sharma's avatar Ajay Sharma Committed by Leon Romanovsky

net: mana: Define data structures for protection domain and memory registration

The MANA hardware support protection domain and memory registration for use
in RDMA environment. Add those definitions and expose them for use by the
RDMA driver.
Signed-off-by: default avatarAjay Sharma <sharmaajay@microsoft.com>
Signed-off-by: default avatarLong Li <longli@microsoft.com>
Link: https://lore.kernel.org/r/1667502990-2559-12-git-send-email-longli@linuxonhyperv.comReviewed-by: default avatarDexuan Cui <decui@microsoft.com>
Acked-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
parent f72ececf
...@@ -198,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc, ...@@ -198,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
req.type = queue->type; req.type = queue->type;
req.pdid = queue->gdma_dev->pdid; req.pdid = queue->gdma_dev->pdid;
req.doolbell_id = queue->gdma_dev->doorbell; req.doolbell_id = queue->gdma_dev->doorbell;
req.gdma_region = queue->mem_info.gdma_region; req.gdma_region = queue->mem_info.dma_region_handle;
req.queue_size = queue->queue_size; req.queue_size = queue->queue_size;
req.log2_throttle_limit = queue->eq.log2_throttle_limit; req.log2_throttle_limit = queue->eq.log2_throttle_limit;
req.eq_pci_msix_index = queue->eq.msix_index; req.eq_pci_msix_index = queue->eq.msix_index;
...@@ -212,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc, ...@@ -212,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
queue->id = resp.queue_index; queue->id = resp.queue_index;
queue->eq.disable_needed = true; queue->eq.disable_needed = true;
queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
return 0; return 0;
} }
...@@ -671,24 +671,30 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd, ...@@ -671,24 +671,30 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
return err; return err;
} }
static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region) int mana_gd_destroy_dma_region(struct gdma_context *gc,
gdma_obj_handle_t dma_region_handle)
{ {
struct gdma_destroy_dma_region_req req = {}; struct gdma_destroy_dma_region_req req = {};
struct gdma_general_resp resp = {}; struct gdma_general_resp resp = {};
int err; int err;
if (gdma_region == GDMA_INVALID_DMA_REGION) if (dma_region_handle == GDMA_INVALID_DMA_REGION)
return; return 0;
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req), mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
sizeof(resp)); sizeof(resp));
req.gdma_region = gdma_region; req.dma_region_handle = dma_region_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n", dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
err, resp.hdr.status); err, resp.hdr.status);
return -EPROTO;
}
return 0;
} }
EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
static int mana_gd_create_dma_region(struct gdma_dev *gd, static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi) struct gdma_mem_info *gmi)
...@@ -733,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, ...@@ -733,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
if (err) if (err)
goto out; goto out;
if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) { if (resp.hdr.status ||
resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
dev_err(gc->dev, "Failed to create DMA region: 0x%x\n", dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
resp.hdr.status); resp.hdr.status);
err = -EPROTO; err = -EPROTO;
goto out; goto out;
} }
gmi->gdma_region = resp.gdma_region; gmi->dma_region_handle = resp.dma_region_handle;
out: out:
kfree(req); kfree(req);
return err; return err;
...@@ -863,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue) ...@@ -863,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
return; return;
} }
mana_gd_destroy_dma_region(gc, gmi->gdma_region); mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
mana_gd_free_memory(gmi); mana_gd_free_memory(gmi);
kfree(queue); kfree(queue);
} }
......
...@@ -1523,10 +1523,10 @@ static int mana_create_txq(struct mana_port_context *apc, ...@@ -1523,10 +1523,10 @@ static int mana_create_txq(struct mana_port_context *apc,
memset(&wq_spec, 0, sizeof(wq_spec)); memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec)); memset(&cq_spec, 0, sizeof(cq_spec));
wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region; wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
wq_spec.queue_size = txq->gdma_sq->queue_size; wq_spec.queue_size = txq->gdma_sq->queue_size;
cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size; cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0; cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
...@@ -1541,8 +1541,10 @@ static int mana_create_txq(struct mana_port_context *apc, ...@@ -1541,8 +1541,10 @@ static int mana_create_txq(struct mana_port_context *apc,
txq->gdma_sq->id = wq_spec.queue_index; txq->gdma_sq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index; cq->gdma_cq->id = cq_spec.queue_index;
txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; txq->gdma_sq->mem_info.dma_region_handle =
cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; GDMA_INVALID_DMA_REGION;
cq->gdma_cq->mem_info.dma_region_handle =
GDMA_INVALID_DMA_REGION;
txq->gdma_txq_id = txq->gdma_sq->id; txq->gdma_txq_id = txq->gdma_sq->id;
...@@ -1753,10 +1755,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, ...@@ -1753,10 +1755,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
memset(&wq_spec, 0, sizeof(wq_spec)); memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec)); memset(&cq_spec, 0, sizeof(cq_spec));
wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region; wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
wq_spec.queue_size = rxq->gdma_rq->queue_size; wq_spec.queue_size = rxq->gdma_rq->queue_size;
cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size; cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0; cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
...@@ -1769,8 +1771,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, ...@@ -1769,8 +1771,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->gdma_rq->id = wq_spec.queue_index; rxq->gdma_rq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index; cq->gdma_cq->id = cq_spec.queue_index;
rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
rxq->gdma_id = rxq->gdma_rq->id; rxq->gdma_id = rxq->gdma_rq->id;
cq->gdma_id = cq->gdma_cq->id; cq->gdma_id = cq->gdma_cq->id;
......
...@@ -29,6 +29,10 @@ enum gdma_request_type { ...@@ -29,6 +29,10 @@ enum gdma_request_type {
GDMA_CREATE_DMA_REGION = 25, GDMA_CREATE_DMA_REGION = 25,
GDMA_DMA_REGION_ADD_PAGES = 26, GDMA_DMA_REGION_ADD_PAGES = 26,
GDMA_DESTROY_DMA_REGION = 27, GDMA_DESTROY_DMA_REGION = 27,
GDMA_CREATE_PD = 29,
GDMA_DESTROY_PD = 30,
GDMA_CREATE_MR = 31,
GDMA_DESTROY_MR = 32,
}; };
#define GDMA_RESOURCE_DOORBELL_PAGE 27 #define GDMA_RESOURCE_DOORBELL_PAGE 27
...@@ -61,6 +65,8 @@ enum { ...@@ -61,6 +65,8 @@ enum {
GDMA_DEVICE_MANA = 2, GDMA_DEVICE_MANA = 2,
}; };
typedef u64 gdma_obj_handle_t;
struct gdma_resource { struct gdma_resource {
/* Protect the bitmap */ /* Protect the bitmap */
spinlock_t lock; spinlock_t lock;
...@@ -194,7 +200,7 @@ struct gdma_mem_info { ...@@ -194,7 +200,7 @@ struct gdma_mem_info {
u64 length; u64 length;
/* Allocated by the PF driver */ /* Allocated by the PF driver */
u64 gdma_region; gdma_obj_handle_t dma_region_handle;
}; };
#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
...@@ -618,7 +624,7 @@ struct gdma_create_queue_req { ...@@ -618,7 +624,7 @@ struct gdma_create_queue_req {
u32 reserved1; u32 reserved1;
u32 pdid; u32 pdid;
u32 doolbell_id; u32 doolbell_id;
u64 gdma_region; gdma_obj_handle_t gdma_region;
u32 reserved2; u32 reserved2;
u32 queue_size; u32 queue_size;
u32 log2_throttle_limit; u32 log2_throttle_limit;
...@@ -645,6 +651,28 @@ struct gdma_disable_queue_req { ...@@ -645,6 +651,28 @@ struct gdma_disable_queue_req {
u32 alloc_res_id_on_creation; u32 alloc_res_id_on_creation;
}; /* HW DATA */ }; /* HW DATA */
enum atb_page_size {
ATB_PAGE_SIZE_4K,
ATB_PAGE_SIZE_8K,
ATB_PAGE_SIZE_16K,
ATB_PAGE_SIZE_32K,
ATB_PAGE_SIZE_64K,
ATB_PAGE_SIZE_128K,
ATB_PAGE_SIZE_256K,
ATB_PAGE_SIZE_512K,
ATB_PAGE_SIZE_1M,
ATB_PAGE_SIZE_2M,
ATB_PAGE_SIZE_MAX,
};
enum gdma_mr_access_flags {
GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
};
/* GDMA_CREATE_DMA_REGION */ /* GDMA_CREATE_DMA_REGION */
struct gdma_create_dma_region_req { struct gdma_create_dma_region_req {
struct gdma_req_hdr hdr; struct gdma_req_hdr hdr;
...@@ -671,14 +699,14 @@ struct gdma_create_dma_region_req { ...@@ -671,14 +699,14 @@ struct gdma_create_dma_region_req {
struct gdma_create_dma_region_resp { struct gdma_create_dma_region_resp {
struct gdma_resp_hdr hdr; struct gdma_resp_hdr hdr;
u64 gdma_region; gdma_obj_handle_t dma_region_handle;
}; /* HW DATA */ }; /* HW DATA */
/* GDMA_DMA_REGION_ADD_PAGES */ /* GDMA_DMA_REGION_ADD_PAGES */
struct gdma_dma_region_add_pages_req { struct gdma_dma_region_add_pages_req {
struct gdma_req_hdr hdr; struct gdma_req_hdr hdr;
u64 gdma_region; gdma_obj_handle_t dma_region_handle;
u32 page_addr_list_len; u32 page_addr_list_len;
u32 reserved3; u32 reserved3;
...@@ -690,9 +718,88 @@ struct gdma_dma_region_add_pages_req { ...@@ -690,9 +718,88 @@ struct gdma_dma_region_add_pages_req {
struct gdma_destroy_dma_region_req { struct gdma_destroy_dma_region_req {
struct gdma_req_hdr hdr; struct gdma_req_hdr hdr;
u64 gdma_region; gdma_obj_handle_t dma_region_handle;
}; /* HW DATA */ }; /* HW DATA */
enum gdma_pd_flags {
GDMA_PD_FLAG_INVALID = 0,
};
struct gdma_create_pd_req {
struct gdma_req_hdr hdr;
enum gdma_pd_flags flags;
u32 reserved;
};/* HW DATA */
struct gdma_create_pd_resp {
struct gdma_resp_hdr hdr;
gdma_obj_handle_t pd_handle;
u32 pd_id;
u32 reserved;
};/* HW DATA */
struct gdma_destroy_pd_req {
struct gdma_req_hdr hdr;
gdma_obj_handle_t pd_handle;
};/* HW DATA */
struct gdma_destory_pd_resp {
struct gdma_resp_hdr hdr;
};/* HW DATA */
enum gdma_mr_type {
/* Guest Virtual Address - MRs of this type allow access
* to memory mapped by PTEs associated with this MR using a virtual
* address that is set up in the MST
*/
GDMA_MR_TYPE_GVA = 2,
};
struct gdma_create_mr_params {
gdma_obj_handle_t pd_handle;
enum gdma_mr_type mr_type;
union {
struct {
gdma_obj_handle_t dma_region_handle;
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
};
};
struct gdma_create_mr_request {
struct gdma_req_hdr hdr;
gdma_obj_handle_t pd_handle;
enum gdma_mr_type mr_type;
u32 reserved_1;
union {
struct {
gdma_obj_handle_t dma_region_handle;
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
};
u32 reserved_2;
};/* HW DATA */
struct gdma_create_mr_response {
struct gdma_resp_hdr hdr;
gdma_obj_handle_t mr_handle;
u32 lkey;
u32 rkey;
};/* HW DATA */
struct gdma_destroy_mr_request {
struct gdma_req_hdr hdr;
gdma_obj_handle_t mr_handle;
};/* HW DATA */
struct gdma_destroy_mr_response {
struct gdma_resp_hdr hdr;
};/* HW DATA */
int mana_gd_verify_vf_version(struct pci_dev *pdev); int mana_gd_verify_vf_version(struct pci_dev *pdev);
int mana_gd_register_device(struct gdma_dev *gd); int mana_gd_register_device(struct gdma_dev *gd);
...@@ -719,4 +826,8 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi); ...@@ -719,4 +826,8 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi);
int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp); u32 resp_len, void *resp);
int mana_gd_destroy_dma_region(struct gdma_context *gc,
gdma_obj_handle_t dma_region_handle);
#endif /* _GDMA_H */ #endif /* _GDMA_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment