Commit 8eabaf54 authored by Kent Russell's avatar Kent Russell Committed by Oded Gabbay

drm/amdkfd: Clean up KFD style errors and warnings v2

Using checkpatch.pl -f <file> showed a number of style issues. This
patch addresses as many of them as possible. Some long lines have been
left for readability, but attempts to minimize them have been made.

v2: Broke long lines in gfx_v7 get_fw_version
Signed-off-by: default avatarKent Russell <kent.russell@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 438e29a2
...@@ -28,14 +28,14 @@ ...@@ -28,14 +28,14 @@
#include <linux/module.h> #include <linux/module.h>
const struct kgd2kfd_calls *kgd2kfd; const struct kgd2kfd_calls *kgd2kfd;
bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
int amdgpu_amdkfd_init(void) int amdgpu_amdkfd_init(void)
{ {
int ret; int ret;
#if defined(CONFIG_HSA_AMD_MODULE) #if defined(CONFIG_HSA_AMD_MODULE)
int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init); kgd2kfd_init_p = symbol_request(kgd2kfd_init);
......
...@@ -566,42 +566,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) ...@@ -566,42 +566,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) { switch (type) {
case KGD_ENGINE_PFP: case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.pfp_fw->data; adev->gfx.pfp_fw->data;
break; break;
case KGD_ENGINE_ME: case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.me_fw->data; adev->gfx.me_fw->data;
break; break;
case KGD_ENGINE_CE: case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.ce_fw->data; adev->gfx.ce_fw->data;
break; break;
case KGD_ENGINE_MEC1: case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.mec_fw->data; adev->gfx.mec_fw->data;
break; break;
case KGD_ENGINE_MEC2: case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.mec2_fw->data; adev->gfx.mec2_fw->data;
break; break;
case KGD_ENGINE_RLC: case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.rlc_fw->data; adev->gfx.rlc_fw->data;
break; break;
case KGD_ENGINE_SDMA1: case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma.instance[0].fw->data; adev->sdma.instance[0].fw->data;
break; break;
case KGD_ENGINE_SDMA2: case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma.instance[1].fw->data; adev->sdma.instance[1].fw->data;
break; break;
default: default:
......
...@@ -454,42 +454,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) ...@@ -454,42 +454,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) { switch (type) {
case KGD_ENGINE_PFP: case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.pfp_fw->data; adev->gfx.pfp_fw->data;
break; break;
case KGD_ENGINE_ME: case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.me_fw->data; adev->gfx.me_fw->data;
break; break;
case KGD_ENGINE_CE: case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.ce_fw->data; adev->gfx.ce_fw->data;
break; break;
case KGD_ENGINE_MEC1: case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.mec_fw->data; adev->gfx.mec_fw->data;
break; break;
case KGD_ENGINE_MEC2: case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.mec2_fw->data; adev->gfx.mec2_fw->data;
break; break;
case KGD_ENGINE_RLC: case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.rlc_fw->data; adev->gfx.rlc_fw->data;
break; break;
case KGD_ENGINE_SDMA1: case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma.instance[0].fw->data; adev->sdma.instance[0].fw->data;
break; break;
case KGD_ENGINE_SDMA2: case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma.instance[1].fw->data; adev->sdma.instance[1].fw->data;
break; break;
default: default:
......
...@@ -782,7 +782,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, ...@@ -782,7 +782,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
"scratch_limit %llX\n", pdd->scratch_limit); "scratch_limit %llX\n", pdd->scratch_limit);
args->num_of_nodes++; args->num_of_nodes++;
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && } while ((pdd = kfd_get_next_process_device_data(p, pdd)) !=
NULL &&
(args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
} }
...@@ -848,7 +849,8 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p, ...@@ -848,7 +849,8 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
} }
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
/** Ioctl table */ /** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
......
...@@ -313,7 +313,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, ...@@ -313,7 +313,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
return -EINVAL; return -EINVAL;
} }
for (i = 0 ; i < adw_info->num_watch_points ; i++) { for (i = 0; i < adw_info->num_watch_points; i++) {
dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo, dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo,
&cntl, i, pdd->qpd.vmid); &cntl, i, pdd->qpd.vmid);
...@@ -623,7 +623,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev, ...@@ -623,7 +623,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
return status; return status;
} }
/* we do not control the VMID in DIQ,so reset it to a known value */ /* we do not control the VMID in DIQ, so reset it to a known value */
reg_sq_cmd.bits.vm_id = 0; reg_sq_cmd.bits.vm_id = 0;
pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
...@@ -810,7 +810,8 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) ...@@ -810,7 +810,8 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING .. /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING * ATC_VMID15_PASID_MAPPING
* to check which VMID the current process is mapped to. */ * to check which VMID the current process is mapped to.
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
......
...@@ -30,13 +30,11 @@ ...@@ -30,13 +30,11 @@
#pragma pack(push, 4) #pragma pack(push, 4)
enum HSA_DBG_WAVEOP { enum HSA_DBG_WAVEOP {
HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */ HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */ HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */ HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter dbg mode */
debug mode */ HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take a trap */
HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take
a trap */
HSA_DBG_NUM_WAVEOP = 5, HSA_DBG_NUM_WAVEOP = 5,
HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
}; };
...@@ -81,15 +79,13 @@ struct HsaDbgWaveMsgAMDGen2 { ...@@ -81,15 +79,13 @@ struct HsaDbgWaveMsgAMDGen2 {
uint32_t UserData:8; /* user data */ uint32_t UserData:8; /* user data */
uint32_t ShaderArray:1; /* Shader array */ uint32_t ShaderArray:1; /* Shader array */
uint32_t Priv:1; /* Privileged */ uint32_t Priv:1; /* Privileged */
uint32_t Reserved0:4; /* This field is reserved, uint32_t Reserved0:4; /* Reserved, should be 0 */
should be 0 */
uint32_t WaveId:4; /* wave id */ uint32_t WaveId:4; /* wave id */
uint32_t SIMD:2; /* SIMD id */ uint32_t SIMD:2; /* SIMD id */
uint32_t HSACU:4; /* Compute unit */ uint32_t HSACU:4; /* Compute unit */
uint32_t ShaderEngine:2;/* Shader engine */ uint32_t ShaderEngine:2;/* Shader engine */
uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */ uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
uint32_t Reserved1:4; /* This field is reserved, uint32_t Reserved1:4; /* Reserved, should be 0 */
should be 0 */
} ui32; } ui32;
uint32_t Value; uint32_t Value;
}; };
...@@ -121,20 +117,23 @@ struct HsaDbgWaveMessage { ...@@ -121,20 +117,23 @@ struct HsaDbgWaveMessage {
* in the user mode instruction stream. The OS scheduler event is typically * in the user mode instruction stream. The OS scheduler event is typically
* associated and signaled by an interrupt issued by the GPU, but other HSA * associated and signaled by an interrupt issued by the GPU, but other HSA
* system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced * system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
* by the KFD by this mechanism, too. */ * by the KFD by this mechanism, too.
*/
/* these are the new definitions for events */ /* these are the new definitions for events */
enum HSA_EVENTTYPE { enum HSA_EVENTTYPE {
HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */ HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */
HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */ HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */
HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change
(start/stop) */ * (start/stop)
*/
HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */ HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */ HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */ HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */
HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */ HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state
(EOP pm4) */ * (EOP pm4)
*/
/* ... */ /* ... */
HSA_EVENTTYPE_MAXID, HSA_EVENTTYPE_MAXID,
HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
......
...@@ -155,12 +155,13 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd) ...@@ -155,12 +155,13 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n", dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0); (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
!= 0);
return false; return false;
} }
pasid_limit = min_t(unsigned int, pasid_limit = min_t(unsigned int,
(unsigned int)1 << kfd->device_info->max_pasid_bits, (unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids); iommu_info.max_pasids);
/* /*
* last pasid is used for kernel queues doorbells * last pasid is used for kernel queues doorbells
......
...@@ -216,7 +216,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) ...@@ -216,7 +216,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
set = false; set = false;
for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm); for (pipe = dqm->next_pipe_to_allocate, i = 0;
i < get_pipes_per_mec(dqm);
pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) { pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
if (!is_pipe_enabled(dqm, 0, pipe)) if (!is_pipe_enabled(dqm, 0, pipe))
...@@ -669,7 +670,8 @@ static int set_sched_resources(struct device_queue_manager *dqm) ...@@ -669,7 +670,8 @@ static int set_sched_resources(struct device_queue_manager *dqm)
/* This situation may be hit in the future if a new HW /* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the * generation exposes more than 64 queues. If so, the
* definition of res.queue_mask needs updating */ * definition of res.queue_mask needs updating
*/
if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
pr_err("Invalid queue enabled by amdgpu: %d\n", i); pr_err("Invalid queue enabled by amdgpu: %d\n", i);
break; break;
...@@ -890,7 +892,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -890,7 +892,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
} }
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count++; dqm->sdma_queue_count++;
/* /*
* Unconditionally increment this counter, regardless of the queue's * Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active. * type or whether the queue is active.
......
...@@ -194,7 +194,8 @@ static void release_event_notification_slot(struct signal_page *page, ...@@ -194,7 +194,8 @@ static void release_event_notification_slot(struct signal_page *page,
page->free_slots++; page->free_slots++;
/* We don't free signal pages, they are retained by the process /* We don't free signal pages, they are retained by the process
* and reused until it exits. */ * and reused until it exits.
*/
} }
static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p, static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
...@@ -584,7 +585,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, ...@@ -584,7 +585,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
* search faster. * search faster.
*/ */
struct signal_page *page; struct signal_page *page;
unsigned i; unsigned int i;
list_for_each_entry(page, &p->signal_event_pages, event_pages) list_for_each_entry(page, &p->signal_event_pages, event_pages)
for (i = 0; i < SLOTS_PER_PAGE; i++) for (i = 0; i < SLOTS_PER_PAGE; i++)
......
...@@ -179,7 +179,7 @@ static void interrupt_wq(struct work_struct *work) ...@@ -179,7 +179,7 @@ static void interrupt_wq(struct work_struct *work)
bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry) bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
{ {
/* integer and bitwise OR so there is no boolean short-circuiting */ /* integer and bitwise OR so there is no boolean short-circuiting */
unsigned wanted = 0; unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev, wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
ih_ring_entry); ih_ring_entry);
......
...@@ -61,7 +61,8 @@ MODULE_PARM_DESC(send_sigterm, ...@@ -61,7 +61,8 @@ MODULE_PARM_DESC(send_sigterm,
static int amdkfd_init_completed; static int amdkfd_init_completed;
int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) int kgd2kfd_init(unsigned int interface_version,
const struct kgd2kfd_calls **g2f)
{ {
if (!amdkfd_init_completed) if (!amdkfd_init_completed)
return -EPROBE_DEFER; return -EPROBE_DEFER;
......
...@@ -193,9 +193,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd, ...@@ -193,9 +193,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_vmid = q->vmid; m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) { if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR; m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
}
m->cp_hqd_active = 0; m->cp_hqd_active = 0;
q->is_active = false; q->is_active = false;
......
...@@ -458,7 +458,7 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -458,7 +458,7 @@ int pm_send_set_resources(struct packet_manager *pm,
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
sizeof(*packet) / sizeof(uint32_t), sizeof(*packet) / sizeof(uint32_t),
(unsigned int **)&packet); (unsigned int **)&packet);
if (packet == NULL) { if (packet == NULL) {
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
pr_err("kfd: failed to allocate buffer on kernel queue\n"); pr_err("kfd: failed to allocate buffer on kernel queue\n");
...@@ -530,8 +530,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) ...@@ -530,8 +530,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
fail_acquire_packet_buffer: fail_acquire_packet_buffer:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
fail_create_runlist_ib: fail_create_runlist_ib:
if (pm->allocated) pm_release_ib(pm);
pm_release_ib(pm);
return retval; return retval;
} }
......
...@@ -32,7 +32,8 @@ int kfd_pasid_init(void) ...@@ -32,7 +32,8 @@ int kfd_pasid_init(void)
{ {
pasid_limit = KFD_MAX_NUM_OF_PROCESSES; pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
GFP_KERNEL);
if (!pasid_bitmap) if (!pasid_bitmap)
return -ENOMEM; return -ENOMEM;
......
...@@ -28,14 +28,14 @@ ...@@ -28,14 +28,14 @@
#define PM4_MES_HEADER_DEFINED #define PM4_MES_HEADER_DEFINED
union PM4_MES_TYPE_3_HEADER { union PM4_MES_TYPE_3_HEADER {
struct { struct {
uint32_t reserved1:8; /* < reserved */ /* reserved */
uint32_t opcode:8; /* < IT opcode */ uint32_t reserved1:8;
uint32_t count:14; /* < number of DWORDs - 1 /* IT opcode */
* in the information body. uint32_t opcode:8;
*/ /* number of DWORDs - 1 in the information body */
uint32_t type:2; /* < packet identifier. uint32_t count:14;
* It should be 3 for type 3 packets /* packet identifier. It should be 3 for type 3 packets */
*/ uint32_t type:2;
}; };
uint32_t u32all; uint32_t u32all;
}; };
......
...@@ -30,10 +30,12 @@ union PM4_MES_TYPE_3_HEADER { ...@@ -30,10 +30,12 @@ union PM4_MES_TYPE_3_HEADER {
struct { struct {
uint32_t reserved1 : 8; /* < reserved */ uint32_t reserved1 : 8; /* < reserved */
uint32_t opcode : 8; /* < IT opcode */ uint32_t opcode : 8; /* < IT opcode */
uint32_t count : 14;/* < number of DWORDs - 1 in the uint32_t count : 14;/* < Number of DWORDS - 1 in the
information body. */ * information body
uint32_t type : 2; /* < packet identifier. */
It should be 3 for type 3 packets */ uint32_t type : 2; /* < packet identifier
* It should be 3 for type 3 packets
*/
}; };
uint32_t u32All; uint32_t u32All;
}; };
......
...@@ -294,13 +294,13 @@ enum kfd_queue_format { ...@@ -294,13 +294,13 @@ enum kfd_queue_format {
* @write_ptr: Defines the number of dwords written to the ring buffer. * @write_ptr: Defines the number of dwords written to the ring buffer.
* *
* @doorbell_ptr: This field aim is to notify the H/W of new packet written to * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
* the queue ring buffer. This field should be similar to write_ptr and the user * the queue ring buffer. This field should be similar to write_ptr and the
* should update this field after he updated the write_ptr. * user should update this field after he updated the write_ptr.
* *
* @doorbell_off: The doorbell offset in the doorbell pci-bar. * @doorbell_off: The doorbell offset in the doorbell pci-bar.
* *
* @is_interop: Defines if this is a interop queue. Interop queue means that the * @is_interop: Defines if this is a interop queue. Interop queue means that
* queue can access both graphics and compute resources. * the queue can access both graphics and compute resources.
* *
* @is_active: Defines if the queue is active or not. * @is_active: Defines if the queue is active or not.
* *
...@@ -352,9 +352,10 @@ struct queue_properties { ...@@ -352,9 +352,10 @@ struct queue_properties {
* @properties: The queue properties. * @properties: The queue properties.
* *
* @mec: Used only in no cp scheduling mode and identifies to micro engine id * @mec: Used only in no cp scheduling mode and identifies to micro engine id
* that the queue should be execute on. * that the queue should be execute on.
* *
* @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id. * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
* id.
* *
* @queue: Used only in no cp scheduliong mode and identifies the queue's slot. * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
* *
...@@ -520,8 +521,8 @@ struct kfd_process { ...@@ -520,8 +521,8 @@ struct kfd_process {
struct mutex event_mutex; struct mutex event_mutex;
/* All events in process hashed by ID, linked on kfd_event.events. */ /* All events in process hashed by ID, linked on kfd_event.events. */
DECLARE_HASHTABLE(events, 4); DECLARE_HASHTABLE(events, 4);
struct list_head signal_event_pages; /* struct slot_page_header. /* struct slot_page_header.event_pages */
event_pages */ struct list_head signal_event_pages;
u32 next_nonsignal_event_id; u32 next_nonsignal_event_id;
size_t signal_event_count; size_t signal_event_count;
}; };
...@@ -559,8 +560,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, ...@@ -559,8 +560,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p); struct kfd_process *p);
/* Process device data iterator */ /* Process device data iterator */
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p); struct kfd_process_device *kfd_get_first_process_device_data(
struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, struct kfd_process *p);
struct kfd_process_device *kfd_get_next_process_device_data(
struct kfd_process *p,
struct kfd_process_device *pdd); struct kfd_process_device *pdd);
bool kfd_has_process_device_data(struct kfd_process *p); bool kfd_has_process_device_data(struct kfd_process *p);
......
...@@ -449,14 +449,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) ...@@ -449,14 +449,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
} }
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) struct kfd_process_device *kfd_get_first_process_device_data(
struct kfd_process *p)
{ {
return list_first_entry(&p->per_device_data, return list_first_entry(&p->per_device_data,
struct kfd_process_device, struct kfd_process_device,
per_device_list); per_device_list);
} }
struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, struct kfd_process_device *kfd_get_next_process_device_data(
struct kfd_process *p,
struct kfd_process_device *pdd) struct kfd_process_device *pdd)
{ {
if (list_is_last(&pdd->per_device_list, &p->per_device_data)) if (list_is_last(&pdd->per_device_list, &p->per_device_data))
......
...@@ -1170,8 +1170,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) ...@@ -1170,8 +1170,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* GPU vBIOS * GPU vBIOS
*/ */
/* /* Update the SYSFS tree, since we added another topology
* Update the SYSFS tree, since we added another topology device * device
*/ */
if (kfd_topology_update_sysfs() < 0) if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs(); kfd_topology_release_sysfs();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment