Commit 56c5977e authored by Graham Sider's avatar Graham Sider Committed by Alex Deucher

drm/amdkfd: replace/remove remaining kgd_dev references

Remove get_amdgpu_device and other remaining kgd_dev references aside
from declaration/kfd struct entry and initialization.
Signed-off-by: default avatarGraham Sider <Graham.Sider@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent dff63da9
...@@ -57,11 +57,6 @@ ...@@ -57,11 +57,6 @@
(*dump)[i++][1] = RREG32(addr); \ (*dump)[i++][1] = RREG32(addr); \
} while (0) } while (0)
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
}
static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
{ {
return (struct v9_sdma_mqd *)mqd; return (struct v9_sdma_mqd *)mqd;
......
...@@ -39,11 +39,6 @@ enum hqd_dequeue_request_type { ...@@ -39,11 +39,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES SAVE_WAVES
}; };
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
}
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid) uint32_t queue, uint32_t vmid)
{ {
......
...@@ -38,11 +38,6 @@ enum hqd_dequeue_request_type { ...@@ -38,11 +38,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES SAVE_WAVES
}; };
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
}
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid) uint32_t queue, uint32_t vmid)
{ {
......
...@@ -82,11 +82,6 @@ union TCP_WATCH_CNTL_BITS { ...@@ -82,11 +82,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All; float f32All;
}; };
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
}
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid) uint32_t queue, uint32_t vmid)
{ {
......
...@@ -39,11 +39,6 @@ enum hqd_dequeue_request_type { ...@@ -39,11 +39,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES RESET_WAVES
}; };
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
}
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid) uint32_t queue, uint32_t vmid)
{ {
......
...@@ -46,11 +46,6 @@ enum hqd_dequeue_request_type { ...@@ -46,11 +46,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES SAVE_WAVES
}; };
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
}
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid) uint32_t queue, uint32_t vmid)
{ {
......
...@@ -1963,8 +1963,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, ...@@ -1963,8 +1963,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
struct crat_subtype_iolink *sub_type_hdr, struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain) uint32_t proximity_domain)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd;
*avail_size -= sizeof(struct crat_subtype_iolink); *avail_size -= sizeof(struct crat_subtype_iolink);
if (*avail_size < 0) if (*avail_size < 0)
return -ENOMEM; return -ENOMEM;
...@@ -1981,7 +1979,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, ...@@ -1981,7 +1979,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
/* Fill in IOLINK subtype. /* Fill in IOLINK subtype.
* TODO: Fill-in other fields of iolink subtype * TODO: Fill-in other fields of iolink subtype
*/ */
if (adev->gmc.xgmi.connected_to_cpu) { if (kdev->adev->gmc.xgmi.connected_to_cpu) {
/* /*
* with host gpu xgmi link, host can access gpu memory whether * with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional * or not pcie bar type is large, so always create bidirectional
...@@ -1990,7 +1988,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, ...@@ -1990,7 +1988,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL; sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->num_hops_xgmi = 1; sub_type_hdr->num_hops_xgmi = 1;
if (adev->asic_type == CHIP_ALDEBARAN) { if (kdev->adev->asic_type == CHIP_ALDEBARAN) {
sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->minimum_bandwidth_mbs =
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes( amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
kdev->adev, NULL, true); kdev->adev, NULL, true);
......
...@@ -1031,7 +1031,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -1031,7 +1031,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_cwsr_init(kfd); kfd_cwsr_init(kfd);
svm_migrate_init((struct amdgpu_device *)kfd->kgd); svm_migrate_init(kfd->adev);
if(kgd2kfd_resume_iommu(kfd)) if(kgd2kfd_resume_iommu(kfd))
goto device_iommu_error; goto device_iommu_error;
......
...@@ -1132,7 +1132,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) ...@@ -1132,7 +1132,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
res.queue_mask |= 1ull res.queue_mask |= 1ull
<< amdgpu_queue_mask_bit_to_set_resource_bit( << amdgpu_queue_mask_bit_to_set_resource_bit(
(struct amdgpu_device *)dqm->dev->kgd, i); dqm->dev->adev, i);
} }
res.gws_mask = ~0ull; res.gws_mask = ~0ull;
res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0; res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
......
...@@ -892,7 +892,7 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); ...@@ -892,7 +892,7 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
int kfd_process_gpuid_from_kgd(struct kfd_process *p, int kfd_process_gpuid_from_adev(struct kfd_process *p,
struct amdgpu_device *adev, uint32_t *gpuid, struct amdgpu_device *adev, uint32_t *gpuid,
uint32_t *gpuidx); uint32_t *gpuidx);
static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p,
......
...@@ -1779,14 +1779,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) ...@@ -1779,14 +1779,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
} }
int int
kfd_process_gpuid_from_kgd(struct kfd_process *p, struct amdgpu_device *adev, kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
uint32_t *gpuid, uint32_t *gpuidx) uint32_t *gpuid, uint32_t *gpuidx)
{ {
struct kgd_dev *kgd = (struct kgd_dev *)adev;
int i; int i;
for (i = 0; i < p->n_pdds; i++) for (i = 0; i < p->n_pdds; i++)
if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) { if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
*gpuid = p->pdds[i]->dev->id; *gpuid = p->pdds[i]->dev->id;
*gpuidx = i; *gpuidx = i;
return 0; return 0;
......
...@@ -207,7 +207,6 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) ...@@ -207,7 +207,6 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
uint64_t throttle_bitmask) uint64_t throttle_bitmask)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
/* /*
* ThermalThrottle msg = throttle_bitmask(8): * ThermalThrottle msg = throttle_bitmask(8):
* thermal_interrupt_count(16): * thermal_interrupt_count(16):
...@@ -223,14 +222,13 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, ...@@ -223,14 +222,13 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n", len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n",
KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
atomic64_read(&adev->smu.throttle_int_counter)); atomic64_read(&dev->adev->smu.throttle_int_counter));
add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len);
} }
void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
struct amdgpu_task_info task_info; struct amdgpu_task_info task_info;
/* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */ /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */
/* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n + /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n +
...@@ -243,7 +241,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) ...@@ -243,7 +241,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
return; return;
memset(&task_info, 0, sizeof(struct amdgpu_task_info)); memset(&task_info, 0, sizeof(struct amdgpu_task_info));
amdgpu_vm_get_task_info(adev, pasid, &task_info); amdgpu_vm_get_task_info(dev->adev, pasid, &task_info);
/* Report VM faults from user applications, not retry from kernel */ /* Report VM faults from user applications, not retry from kernel */
if (!task_info.pid) if (!task_info.pid)
return; return;
......
...@@ -193,7 +193,6 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, ...@@ -193,7 +193,6 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
struct amdgpu_device *adev;
pr_debug("mapping to gpu idx 0x%x\n", gpuidx); pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
pdd = kfd_process_device_from_gpuidx(p, gpuidx); pdd = kfd_process_device_from_gpuidx(p, gpuidx);
...@@ -201,9 +200,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, ...@@ -201,9 +200,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
pr_debug("failed to find device idx %d\n", gpuidx); pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL; return -EINVAL;
} }
adev = (struct amdgpu_device *)pdd->dev->kgd;
r = svm_range_dma_map_dev(adev, prange, offset, npages, r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
hmm_pfns, gpuidx); hmm_pfns, gpuidx);
if (r) if (r)
break; break;
...@@ -581,7 +579,7 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) ...@@ -581,7 +579,7 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
return NULL; return NULL;
} }
return (struct amdgpu_device *)pdd->dev->kgd; return pdd->dev->adev;
} }
struct kfd_process_device * struct kfd_process_device *
...@@ -593,7 +591,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) ...@@ -593,7 +591,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
p = container_of(prange->svms, struct kfd_process, svms); p = container_of(prange->svms, struct kfd_process, svms);
r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx); r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
if (r) { if (r) {
pr_debug("failed to get device id by adev %p\n", adev); pr_debug("failed to get device id by adev %p\n", adev);
return NULL; return NULL;
...@@ -1255,7 +1253,6 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, ...@@ -1255,7 +1253,6 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
{ {
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev; struct amdgpu_device *bo_adev;
struct amdgpu_device *adev;
struct kfd_process *p; struct kfd_process *p;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
uint32_t gpuidx; uint32_t gpuidx;
...@@ -1274,19 +1271,18 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, ...@@ -1274,19 +1271,18 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
pr_debug("failed to find device idx %d\n", gpuidx); pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL; return -EINVAL;
} }
adev = (struct amdgpu_device *)pdd->dev->kgd;
pdd = kfd_bind_process_to_device(pdd->dev, p); pdd = kfd_bind_process_to_device(pdd->dev, p);
if (IS_ERR(pdd)) if (IS_ERR(pdd))
return -EINVAL; return -EINVAL;
if (bo_adev && adev != bo_adev && if (bo_adev && pdd->dev->adev != bo_adev &&
!amdgpu_xgmi_same_hive(adev, bo_adev)) { !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
pr_debug("cannot map to device idx %d\n", gpuidx); pr_debug("cannot map to device idx %d\n", gpuidx);
continue; continue;
} }
r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv), r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv),
prange, offset, npages, readonly, prange, offset, npages, readonly,
prange->dma_addr[gpuidx], prange->dma_addr[gpuidx],
bo_adev, wait ? &fence : NULL); bo_adev, wait ? &fence : NULL);
...@@ -1320,7 +1316,6 @@ struct svm_validate_context { ...@@ -1320,7 +1316,6 @@ struct svm_validate_context {
static int svm_range_reserve_bos(struct svm_validate_context *ctx) static int svm_range_reserve_bos(struct svm_validate_context *ctx)
{ {
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
struct amdgpu_device *adev;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
uint32_t gpuidx; uint32_t gpuidx;
int r; int r;
...@@ -1332,7 +1327,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) ...@@ -1332,7 +1327,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
pr_debug("failed to find device idx %d\n", gpuidx); pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL; return -EINVAL;
} }
adev = (struct amdgpu_device *)pdd->dev->kgd;
vm = drm_priv_to_vm(pdd->drm_priv); vm = drm_priv_to_vm(pdd->drm_priv);
ctx->tv[gpuidx].bo = &vm->root.bo->tbo; ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
...@@ -1354,9 +1348,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) ...@@ -1354,9 +1348,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
r = -EINVAL; r = -EINVAL;
goto unreserve_out; goto unreserve_out;
} }
adev = (struct amdgpu_device *)pdd->dev->kgd;
r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv), r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
drm_priv_to_vm(pdd->drm_priv),
svm_range_bo_validate, NULL); svm_range_bo_validate, NULL);
if (r) { if (r) {
pr_debug("failed %d validate pt bos\n", r); pr_debug("failed %d validate pt bos\n", r);
...@@ -1379,12 +1373,10 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx) ...@@ -1379,12 +1373,10 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
{ {
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
struct amdgpu_device *adev;
pdd = kfd_process_device_from_gpuidx(p, gpuidx); pdd = kfd_process_device_from_gpuidx(p, gpuidx);
adev = (struct amdgpu_device *)pdd->dev->kgd;
return SVM_ADEV_PGMAP_OWNER(adev); return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
} }
/* /*
...@@ -1964,7 +1956,6 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange) ...@@ -1964,7 +1956,6 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
static void svm_range_drain_retry_fault(struct svm_range_list *svms) static void svm_range_drain_retry_fault(struct svm_range_list *svms)
{ {
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
struct amdgpu_device *adev;
struct kfd_process *p; struct kfd_process *p;
uint32_t i; uint32_t i;
...@@ -1976,9 +1967,9 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms) ...@@ -1976,9 +1967,9 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
continue; continue;
pr_debug("drain retry fault gpu %d svms %p\n", i, svms); pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
adev = (struct amdgpu_device *)pdd->dev->kgd;
amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1); amdgpu_ih_wait_on_checkpoint_process(pdd->dev->adev,
&pdd->dev->adev->irq.ih1);
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
} }
} }
...@@ -2299,7 +2290,7 @@ svm_range_best_restore_location(struct svm_range *prange, ...@@ -2299,7 +2290,7 @@ svm_range_best_restore_location(struct svm_range *prange,
p = container_of(prange->svms, struct kfd_process, svms); p = container_of(prange->svms, struct kfd_process, svms);
r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx); r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
if (r < 0) { if (r < 0) {
pr_debug("failed to get gpuid from kgd\n"); pr_debug("failed to get gpuid from kgd\n");
return -1; return -1;
...@@ -2476,7 +2467,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, ...@@ -2476,7 +2467,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
pr_debug("Failed to create prange in address [0x%llx]\n", addr); pr_debug("Failed to create prange in address [0x%llx]\n", addr);
return NULL; return NULL;
} }
if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) { if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
pr_debug("failed to get gpuid from kgd\n"); pr_debug("failed to get gpuid from kgd\n");
svm_range_free(prange); svm_range_free(prange);
return NULL; return NULL;
...@@ -2543,7 +2534,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, ...@@ -2543,7 +2534,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
uint32_t gpuid; uint32_t gpuid;
int r; int r;
r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
if (r < 0) if (r < 0)
return; return;
} }
...@@ -2951,7 +2942,6 @@ svm_range_best_prefetch_location(struct svm_range *prange) ...@@ -2951,7 +2942,6 @@ svm_range_best_prefetch_location(struct svm_range *prange)
uint32_t best_loc = prange->prefetch_loc; uint32_t best_loc = prange->prefetch_loc;
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev; struct amdgpu_device *bo_adev;
struct amdgpu_device *adev;
struct kfd_process *p; struct kfd_process *p;
uint32_t gpuidx; uint32_t gpuidx;
...@@ -2979,12 +2969,11 @@ svm_range_best_prefetch_location(struct svm_range *prange) ...@@ -2979,12 +2969,11 @@ svm_range_best_prefetch_location(struct svm_range *prange)
pr_debug("failed to get device by idx 0x%x\n", gpuidx); pr_debug("failed to get device by idx 0x%x\n", gpuidx);
continue; continue;
} }
adev = (struct amdgpu_device *)pdd->dev->kgd;
if (adev == bo_adev) if (pdd->dev->adev == bo_adev)
continue; continue;
if (!amdgpu_xgmi_same_hive(adev, bo_adev)) { if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
best_loc = 0; best_loc = 0;
break; break;
} }
......
...@@ -1286,7 +1286,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu) ...@@ -1286,7 +1286,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
void *crat_image = NULL; void *crat_image = NULL;
size_t image_size = 0; size_t image_size = 0;
int proximity_domain; int proximity_domain;
struct amdgpu_device *adev;
INIT_LIST_HEAD(&temp_topology_device_list); INIT_LIST_HEAD(&temp_topology_device_list);
...@@ -1296,10 +1295,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) ...@@ -1296,10 +1295,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
proximity_domain = atomic_inc_return(&topology_crat_proximity_domain); proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
adev = (struct amdgpu_device *)(gpu->kgd);
/* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */ /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) { if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) {
struct kfd_topology_device *top_dev; struct kfd_topology_device *top_dev;
down_read(&topology_lock); down_read(&topology_lock);
...@@ -1477,16 +1474,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu) ...@@ -1477,16 +1474,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
/* kfd only concerns sram ecc on GFX and HBM ecc on UMC */ /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
dev->node_props.capability |= dev->node_props.capability |=
((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
HSA_CAP_SRAM_EDCSUPPORTED : 0; HSA_CAP_SRAM_EDCSUPPORTED : 0;
dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? dev->node_props.capability |=
((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
HSA_CAP_MEM_EDCSUPPORTED : 0; HSA_CAP_MEM_EDCSUPPORTED : 0;
if (adev->asic_type != CHIP_VEGA10) if (dev->gpu->adev->asic_type != CHIP_VEGA10)
dev->node_props.capability |= (adev->ras_enabled != 0) ? dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0; HSA_CAP_RASEVENTNOTIFY : 0;
if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
kfd_debug_print_topology(); kfd_debug_print_topology();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment