Commit d69a3b76 authored by Mukul Joshi's avatar Mukul Joshi Committed by Alex Deucher

drm/amdkfd: Cleanup kfd_dev struct

Cleanup kfd_dev struct by removing ddev and pdev as both
drm_device and pci_dev can be fetched from amdgpu_device.
Signed-off-by: default avatarMukul Joshi <mukul.joshi@amd.com>
Tested-by: default avatarAmber Lin <Amber.Lin@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 087b8542
...@@ -195,7 +195,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) ...@@ -195,7 +195,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
} }
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev, adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
adev_to_drm(adev), &gpu_resources); &gpu_resources);
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
......
...@@ -353,7 +353,6 @@ int kgd2kfd_init(void); ...@@ -353,7 +353,6 @@ int kgd2kfd_init(void);
void kgd2kfd_exit(void); void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd, bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources); const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
...@@ -381,7 +380,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) ...@@ -381,7 +380,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
} }
static inline static inline
bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources) const struct kgd2kfd_shared_resources *gpu_resources)
{ {
return false; return false;
......
...@@ -2114,8 +2114,8 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev) ...@@ -2114,8 +2114,8 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
struct acpi_table_header *table_header = NULL; struct acpi_table_header *table_header = NULL;
struct acpi_subtable_header *sub_header = NULL; struct acpi_subtable_header *sub_header = NULL;
unsigned long table_end, subtable_len; unsigned long table_end, subtable_len;
u32 pci_id = pci_domain_nr(kdev->pdev->bus) << 16 | u32 pci_id = pci_domain_nr(kdev->adev->pdev->bus) << 16 |
pci_dev_id(kdev->pdev); pci_dev_id(kdev->adev->pdev);
u32 bdf; u32 bdf;
acpi_status status; acpi_status status;
struct acpi_srat_cpu_affinity *cpu; struct acpi_srat_cpu_affinity *cpu;
...@@ -2190,7 +2190,7 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev) ...@@ -2190,7 +2190,7 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
numa_node = 0; numa_node = 0;
if (numa_node != NUMA_NO_NODE) if (numa_node != NUMA_NO_NODE)
set_dev_node(&kdev->pdev->dev, numa_node); set_dev_node(&kdev->adev->pdev->dev, numa_node);
} }
#endif #endif
...@@ -2251,14 +2251,14 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, ...@@ -2251,14 +2251,14 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->proximity_domain_from = proximity_domain; sub_type_hdr->proximity_domain_from = proximity_domain;
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
if (kdev->pdev->dev.numa_node == NUMA_NO_NODE) if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE)
kfd_find_numa_node_in_srat(kdev); kfd_find_numa_node_in_srat(kdev);
#endif #endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (kdev->pdev->dev.numa_node == NUMA_NO_NODE) if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE)
sub_type_hdr->proximity_domain_to = 0; sub_type_hdr->proximity_domain_to = 0;
else else
sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node; sub_type_hdr->proximity_domain_to = kdev->adev->pdev->dev.numa_node;
#else #else
sub_type_hdr->proximity_domain_to = 0; sub_type_hdr->proximity_domain_to = 0;
#endif #endif
......
...@@ -227,7 +227,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) ...@@ -227,7 +227,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
{ {
struct kfd_dev *kfd = NULL; struct kfd_dev *kfd = NULL;
const struct kfd2kgd_calls *f2g = NULL; const struct kfd2kgd_calls *f2g = NULL;
struct pci_dev *pdev = adev->pdev;
uint32_t gfx_target_version = 0; uint32_t gfx_target_version = 0;
switch (adev->asic_type) { switch (adev->asic_type) {
...@@ -429,7 +428,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) ...@@ -429,7 +428,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
kfd->adev = adev; kfd->adev = adev;
kfd_device_info_init(kfd, vf, gfx_target_version); kfd_device_info_init(kfd, vf, gfx_target_version);
kfd->pdev = pdev;
kfd->init_complete = false; kfd->init_complete = false;
kfd->kfd2kgd = f2g; kfd->kfd2kgd = f2g;
atomic_set(&kfd->compute_profile, 0); atomic_set(&kfd->compute_profile, 0);
...@@ -511,12 +509,10 @@ static void kfd_smi_init(struct kfd_dev *dev) ...@@ -511,12 +509,10 @@ static void kfd_smi_init(struct kfd_dev *dev)
} }
bool kgd2kfd_device_init(struct kfd_dev *kfd, bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources) const struct kgd2kfd_shared_resources *gpu_resources)
{ {
unsigned int size, map_process_packet_size; unsigned int size, map_process_packet_size;
kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC1); KGD_ENGINE_MEC1);
kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
...@@ -541,7 +537,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -541,7 +537,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
dev_info(kfd_device, dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics %d<%d\n", "skipped device %x:%x, PCI rejects atomics %d<%d\n",
kfd->pdev->vendor, kfd->pdev->device, kfd->adev->pdev->vendor, kfd->adev->pdev->device,
kfd->mec_fw_version, kfd->mec_fw_version,
kfd->device_info.no_atomic_fw_version); kfd->device_info.no_atomic_fw_version);
return false; return false;
...@@ -650,8 +646,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -650,8 +646,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_smi_init(kfd); kfd_smi_init(kfd);
kfd->init_complete = true; kfd->init_complete = true;
dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor, dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
kfd->pdev->device); kfd->adev->pdev->device);
pr_debug("Starting kfd with the following scheduling policy %d\n", pr_debug("Starting kfd with the following scheduling policy %d\n",
kfd->dqm->sched_policy); kfd->dqm->sched_policy);
...@@ -676,7 +672,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -676,7 +672,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
dev_err(kfd_device, dev_err(kfd_device,
"device %x:%x NOT added due to errors\n", "device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->adev->pdev->vendor, kfd->adev->pdev->device);
out: out:
return kfd->init_complete; return kfd->init_complete;
} }
...@@ -789,7 +785,7 @@ int kgd2kfd_resume_iommu(struct kfd_dev *kfd) ...@@ -789,7 +785,7 @@ int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
if (err) if (err)
dev_err(kfd_device, dev_err(kfd_device,
"Failed to resume IOMMU for device %x:%x\n", "Failed to resume IOMMU for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->adev->pdev->vendor, kfd->adev->pdev->device);
return err; return err;
} }
...@@ -801,7 +797,7 @@ static int kfd_resume(struct kfd_dev *kfd) ...@@ -801,7 +797,7 @@ static int kfd_resume(struct kfd_dev *kfd)
if (err) if (err)
dev_err(kfd_device, dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n", "Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->adev->pdev->vendor, kfd->adev->pdev->device);
return err; return err;
} }
......
...@@ -49,7 +49,7 @@ int kfd_iommu_check_device(struct kfd_dev *kfd) ...@@ -49,7 +49,7 @@ int kfd_iommu_check_device(struct kfd_dev *kfd)
return -ENODEV; return -ENODEV;
iommu_info.flags = 0; iommu_info.flags = 0;
err = amd_iommu_device_info(kfd->pdev, &iommu_info); err = amd_iommu_device_info(kfd->adev->pdev, &iommu_info);
if (err) if (err)
return err; return err;
...@@ -71,7 +71,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) ...@@ -71,7 +71,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
return 0; return 0;
iommu_info.flags = 0; iommu_info.flags = 0;
err = amd_iommu_device_info(kfd->pdev, &iommu_info); err = amd_iommu_device_info(kfd->adev->pdev, &iommu_info);
if (err < 0) { if (err < 0) {
dev_err(kfd_device, dev_err(kfd_device,
"error getting iommu info. is the iommu enabled?\n"); "error getting iommu info. is the iommu enabled?\n");
...@@ -121,7 +121,7 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) ...@@ -121,7 +121,7 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
return -EINVAL; return -EINVAL;
} }
err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread); err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread);
if (!err) if (!err)
pdd->bound = PDD_BOUND; pdd->bound = PDD_BOUND;
...@@ -139,7 +139,8 @@ void kfd_iommu_unbind_process(struct kfd_process *p) ...@@ -139,7 +139,8 @@ void kfd_iommu_unbind_process(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) for (i = 0; i < p->n_pdds; i++)
if (p->pdds[i]->bound == PDD_BOUND) if (p->pdds[i]->bound == PDD_BOUND)
amd_iommu_unbind_pasid(p->pdds[i]->dev->pdev, p->pasid); amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev,
p->pasid);
} }
/* Callback for process shutdown invoked by the IOMMU driver */ /* Callback for process shutdown invoked by the IOMMU driver */
...@@ -222,7 +223,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) ...@@ -222,7 +223,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
continue; continue;
} }
err = amd_iommu_bind_pasid(kfd->pdev, p->pasid, err = amd_iommu_bind_pasid(kfd->adev->pdev, p->pasid,
p->lead_thread); p->lead_thread);
if (err < 0) { if (err < 0) {
pr_err("Unexpected pasid 0x%x binding failure\n", pr_err("Unexpected pasid 0x%x binding failure\n",
...@@ -282,9 +283,9 @@ void kfd_iommu_suspend(struct kfd_dev *kfd) ...@@ -282,9 +283,9 @@ void kfd_iommu_suspend(struct kfd_dev *kfd)
kfd_unbind_processes_from_device(kfd); kfd_unbind_processes_from_device(kfd);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
amd_iommu_free_device(kfd->pdev); amd_iommu_free_device(kfd->adev->pdev);
} }
/** kfd_iommu_resume - Restore IOMMU after resume /** kfd_iommu_resume - Restore IOMMU after resume
...@@ -302,20 +303,20 @@ int kfd_iommu_resume(struct kfd_dev *kfd) ...@@ -302,20 +303,20 @@ int kfd_iommu_resume(struct kfd_dev *kfd)
pasid_limit = kfd_get_pasid_limit(); pasid_limit = kfd_get_pasid_limit();
err = amd_iommu_init_device(kfd->pdev, pasid_limit); err = amd_iommu_init_device(kfd->adev->pdev, pasid_limit);
if (err) if (err)
return -ENXIO; return -ENXIO;
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev,
iommu_pasid_shutdown_callback); iommu_pasid_shutdown_callback);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev,
iommu_invalid_ppr_cb); iommu_invalid_ppr_cb);
err = kfd_bind_processes_to_device(kfd); err = kfd_bind_processes_to_device(kfd);
if (err) { if (err) {
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
amd_iommu_free_device(kfd->pdev); amd_iommu_free_device(kfd->adev->pdev);
return err; return err;
} }
......
...@@ -254,8 +254,6 @@ struct kfd_dev { ...@@ -254,8 +254,6 @@ struct kfd_dev {
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct kfd_device_info device_info; struct kfd_device_info device_info;
struct pci_dev *pdev;
struct drm_device *ddev;
unsigned int id; /* topology stub index */ unsigned int id; /* topology stub index */
...@@ -1365,7 +1363,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev); ...@@ -1365,7 +1363,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev);
static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
{ {
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = kfd->ddev; struct drm_device *ddev = adev_to_drm(kfd->adev);
return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index, ddev->render->index,
......
...@@ -1050,8 +1050,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) ...@@ -1050,8 +1050,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
* for auto suspend * for auto suspend
*/ */
if (pdd->runtime_inuse) { if (pdd->runtime_inuse) {
pm_runtime_mark_last_busy(pdd->dev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
pm_runtime_put_autosuspend(pdd->dev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
pdd->runtime_inuse = false; pdd->runtime_inuse = false;
} }
...@@ -1633,9 +1633,9 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, ...@@ -1633,9 +1633,9 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
* pdd is destroyed. * pdd is destroyed.
*/ */
if (!pdd->runtime_inuse) { if (!pdd->runtime_inuse) {
err = pm_runtime_get_sync(dev->ddev->dev); err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
if (err < 0) { if (err < 0) {
pm_runtime_put_autosuspend(dev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
return ERR_PTR(err); return ERR_PTR(err);
} }
} }
...@@ -1655,8 +1655,8 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, ...@@ -1655,8 +1655,8 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
out: out:
/* balance runpm reference count and exit with error */ /* balance runpm reference count and exit with error */
if (!pdd->runtime_inuse) { if (!pdd->runtime_inuse) {
pm_runtime_mark_last_busy(dev->ddev->dev); pm_runtime_mark_last_busy(adev_to_drm(dev->adev)->dev);
pm_runtime_put_autosuspend(dev->ddev->dev); pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
} }
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -259,7 +259,7 @@ void svm_range_free_dma_mappings(struct svm_range *prange) ...@@ -259,7 +259,7 @@ void svm_range_free_dma_mappings(struct svm_range *prange)
pr_debug("failed to find device idx %d\n", gpuidx); pr_debug("failed to find device idx %d\n", gpuidx);
continue; continue;
} }
dev = &pdd->dev->pdev->dev; dev = &pdd->dev->adev->pdev->dev;
svm_range_dma_unmap(dev, dma_addr, 0, prange->npages); svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
kvfree(dma_addr); kvfree(dma_addr);
prange->dma_addr[gpuidx] = NULL; prange->dma_addr[gpuidx] = NULL;
......
...@@ -115,7 +115,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) ...@@ -115,7 +115,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
down_read(&topology_lock); down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list) list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->gpu && top_dev->gpu->pdev == pdev) { if (top_dev->gpu && top_dev->gpu->adev->pdev == pdev) {
device = top_dev->gpu; device = top_dev->gpu;
break; break;
} }
...@@ -1169,13 +1169,12 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) ...@@ -1169,13 +1169,12 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
local_mem_size = gpu->local_mem_info.local_mem_size_private + local_mem_size = gpu->local_mem_info.local_mem_size_private +
gpu->local_mem_info.local_mem_size_public; gpu->local_mem_info.local_mem_size_public;
buf[0] = gpu->adev->pdev->devfn;
buf[0] = gpu->pdev->devfn; buf[1] = gpu->adev->pdev->subsystem_vendor |
buf[1] = gpu->pdev->subsystem_vendor | (gpu->adev->pdev->subsystem_device << 16);
(gpu->pdev->subsystem_device << 16); buf[2] = pci_domain_nr(gpu->adev->pdev->bus);
buf[2] = pci_domain_nr(gpu->pdev->bus); buf[3] = gpu->adev->pdev->device;
buf[3] = gpu->pdev->device; buf[4] = gpu->adev->pdev->bus->number;
buf[4] = gpu->pdev->bus->number;
buf[5] = lower_32_bits(local_mem_size); buf[5] = lower_32_bits(local_mem_size);
buf[6] = upper_32_bits(local_mem_size); buf[6] = upper_32_bits(local_mem_size);
...@@ -1269,7 +1268,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev, ...@@ -1269,7 +1268,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
if (target_gpu_dev) { if (target_gpu_dev) {
uint32_t cap; uint32_t cap;
pcie_capability_read_dword(target_gpu_dev->gpu->pdev, pcie_capability_read_dword(target_gpu_dev->gpu->adev->pdev,
PCI_EXP_DEVCAP2, &cap); PCI_EXP_DEVCAP2, &cap);
if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
...@@ -1688,13 +1687,13 @@ int kfd_topology_add_device(struct kfd_dev *gpu) ...@@ -1688,13 +1687,13 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
cu_info.num_shader_arrays_per_engine; cu_info.num_shader_arrays_per_engine;
dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version; dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version;
dev->node_props.vendor_id = gpu->pdev->vendor; dev->node_props.vendor_id = gpu->adev->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device; dev->node_props.device_id = gpu->adev->pdev->device;
dev->node_props.capability |= dev->node_props.capability |=
((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) & ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) &
HSA_CAP_ASIC_REVISION_MASK); HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->pdev); dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
dev->node_props.domain = pci_domain_nr(gpu->pdev->bus); dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
dev->node_props.max_engine_clk_fcompute = dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev);
dev->node_props.max_engine_clk_ccompute = dev->node_props.max_engine_clk_ccompute =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment