Commit 2485c12c authored by David Yat Sin's avatar David Yat Sin Committed by Alex Deucher

drm/amdkfd: CRIU restore sdma id for queues

When re-creating queues during CRIU restore, restore the queue with the
same sdma id value used during CRIU dump.
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarDavid Yat Sin <david.yatsin@amd.com>
Signed-off-by: default avatarRajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8668dfc3
...@@ -58,7 +58,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm, ...@@ -58,7 +58,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
struct queue *q); struct queue *q);
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
static int allocate_sdma_queue(struct device_queue_manager *dqm, static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q); struct queue *q, const uint32_t *restore_sdma_id);
static void kfd_process_hw_exception(struct work_struct *work); static void kfd_process_hw_exception(struct work_struct *work);
static inline static inline
...@@ -299,7 +299,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm, ...@@ -299,7 +299,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
static int create_queue_nocpsch(struct device_queue_manager *dqm, static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q, struct queue *q,
struct qcm_process_device *qpd) struct qcm_process_device *qpd,
const struct kfd_criu_queue_priv_data *qd)
{ {
struct mqd_manager *mqd_mgr; struct mqd_manager *mqd_mgr;
int retval; int retval;
...@@ -339,7 +340,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -339,7 +340,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
q->pipe, q->queue); q->pipe, q->queue);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
retval = allocate_sdma_queue(dqm, q); retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
if (retval) if (retval)
goto deallocate_vmid; goto deallocate_vmid;
dqm->asic_ops.init_sdma_vm(dqm, q, qpd); dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
...@@ -1034,7 +1035,7 @@ static void pre_reset(struct device_queue_manager *dqm) ...@@ -1034,7 +1035,7 @@ static void pre_reset(struct device_queue_manager *dqm)
} }
static int allocate_sdma_queue(struct device_queue_manager *dqm, static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q) struct queue *q, const uint32_t *restore_sdma_id)
{ {
int bit; int bit;
...@@ -1044,9 +1045,21 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, ...@@ -1044,9 +1045,21 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
return -ENOMEM; return -ENOMEM;
} }
bit = __ffs64(dqm->sdma_bitmap); if (restore_sdma_id) {
dqm->sdma_bitmap &= ~(1ULL << bit); /* Re-use existing sdma_id */
q->sdma_id = bit; if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) {
pr_err("SDMA queue already in use\n");
return -EBUSY;
}
dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id);
q->sdma_id = *restore_sdma_id;
} else {
/* Find first available sdma_id */
bit = __ffs64(dqm->sdma_bitmap);
dqm->sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
}
q->properties.sdma_engine_id = q->sdma_id % q->properties.sdma_engine_id = q->sdma_id %
kfd_get_num_sdma_engines(dqm->dev); kfd_get_num_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id / q->properties.sdma_queue_id = q->sdma_id /
...@@ -1056,9 +1069,19 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, ...@@ -1056,9 +1069,19 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
pr_err("No more XGMI SDMA queue to allocate\n"); pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM; return -ENOMEM;
} }
bit = __ffs64(dqm->xgmi_sdma_bitmap); if (restore_sdma_id) {
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit); /* Re-use existing sdma_id */
q->sdma_id = bit; if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) {
pr_err("SDMA queue already in use\n");
return -EBUSY;
}
dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id);
q->sdma_id = *restore_sdma_id;
} else {
bit = __ffs64(dqm->xgmi_sdma_bitmap);
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
}
/* sdma_engine_id is sdma id including /* sdma_engine_id is sdma id including
* both PCIe-optimized SDMAs and XGMI- * both PCIe-optimized SDMAs and XGMI-
* optimized SDMAs. The calculation below * optimized SDMAs. The calculation below
...@@ -1288,7 +1311,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, ...@@ -1288,7 +1311,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
} }
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd) struct qcm_process_device *qpd,
const struct kfd_criu_queue_priv_data *qd)
{ {
int retval; int retval;
struct mqd_manager *mqd_mgr; struct mqd_manager *mqd_mgr;
...@@ -1303,7 +1327,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -1303,7 +1327,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.type == KFD_QUEUE_TYPE_SDMA || if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
dqm_lock(dqm); dqm_lock(dqm);
retval = allocate_sdma_queue(dqm, q); retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
dqm_unlock(dqm); dqm_unlock(dqm);
if (retval) if (retval)
goto out; goto out;
......
...@@ -88,7 +88,8 @@ struct device_process_node { ...@@ -88,7 +88,8 @@ struct device_process_node {
struct device_queue_manager_ops { struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm, int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q, struct queue *q,
struct qcm_process_device *qpd); struct qcm_process_device *qpd,
const struct kfd_criu_queue_priv_data *qd);
int (*destroy_queue)(struct device_queue_manager *dqm, int (*destroy_queue)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd, struct qcm_process_device *qpd,
......
...@@ -272,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -272,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue; goto err_create_queue;
pqn->q = q; pqn->q = q;
pqn->kq = NULL; pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data);
print_queue(q); print_queue(q);
break; break;
...@@ -292,7 +292,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -292,7 +292,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue; goto err_create_queue;
pqn->q = q; pqn->q = q;
pqn->kq = NULL; pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data);
print_queue(q); print_queue(q);
break; break;
case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_DIQ:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment