Commit 8eabaf54 authored by Kent Russell's avatar Kent Russell Committed by Oded Gabbay

drm/amdkfd: Clean up KFD style errors and warnings v2

Using checkpatch.pl -f <file> showed a number of style issues. This
patch addresses as many of them as possible. Some long lines have been
left for readability, but attempts to minimize them have been made.

v2: Broke long lines in gfx_v7 get_fw_version
Signed-off-by: default avatarKent Russell <kent.russell@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 438e29a2
......@@ -28,14 +28,14 @@
#include <linux/module.h>
const struct kgd2kfd_calls *kgd2kfd;
bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
int amdgpu_amdkfd_init(void)
{
int ret;
#if defined(CONFIG_HSA_AMD_MODULE)
int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init);
......
......@@ -782,7 +782,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
"scratch_limit %llX\n", pdd->scratch_limit);
args->num_of_nodes++;
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) !=
NULL &&
(args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
......@@ -848,7 +849,8 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
}
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
......
......@@ -313,7 +313,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
return -EINVAL;
}
for (i = 0 ; i < adw_info->num_watch_points ; i++) {
for (i = 0; i < adw_info->num_watch_points; i++) {
dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo,
&cntl, i, pdd->qpd.vmid);
......@@ -623,7 +623,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
return status;
}
/* we do not control the VMID in DIQ,so reset it to a known value */
/* we do not control the VMID in DIQ, so reset it to a known value */
reg_sq_cmd.bits.vm_id = 0;
pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
......@@ -810,7 +810,8 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING
* to check which VMID the current process is mapped to. */
* to check which VMID the current process is mapped to.
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
......
......@@ -33,10 +33,8 @@ enum HSA_DBG_WAVEOP {
HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter
debug mode */
HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take
a trap */
HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter dbg mode */
HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take a trap */
HSA_DBG_NUM_WAVEOP = 5,
HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
};
......@@ -81,15 +79,13 @@ struct HsaDbgWaveMsgAMDGen2 {
uint32_t UserData:8; /* user data */
uint32_t ShaderArray:1; /* Shader array */
uint32_t Priv:1; /* Privileged */
uint32_t Reserved0:4; /* This field is reserved,
should be 0 */
uint32_t Reserved0:4; /* Reserved, should be 0 */
uint32_t WaveId:4; /* wave id */
uint32_t SIMD:2; /* SIMD id */
uint32_t HSACU:4; /* Compute unit */
uint32_t ShaderEngine:2;/* Shader engine */
uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
uint32_t Reserved1:4; /* This field is reserved,
should be 0 */
uint32_t Reserved1:4; /* Reserved, should be 0 */
} ui32;
uint32_t Value;
};
......@@ -121,20 +117,23 @@ struct HsaDbgWaveMessage {
* in the user mode instruction stream. The OS scheduler event is typically
* associated and signaled by an interrupt issued by the GPU, but other HSA
* system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
* by the KFD by this mechanism, too. */
* by the KFD by this mechanism, too.
*/
/* these are the new definitions for events */
enum HSA_EVENTTYPE {
HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */
HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */
HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change
(start/stop) */
* (start/stop)
*/
HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */
HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state
(EOP pm4) */
* (EOP pm4)
*/
/* ... */
HSA_EVENTTYPE_MAXID,
HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
......
......@@ -155,12 +155,13 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
!= 0);
return false;
}
pasid_limit = min_t(unsigned int,
(unsigned int)1 << kfd->device_info->max_pasid_bits,
(unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids);
/*
* last pasid is used for kernel queues doorbells
......
......@@ -216,7 +216,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
set = false;
for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm);
for (pipe = dqm->next_pipe_to_allocate, i = 0;
i < get_pipes_per_mec(dqm);
pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
if (!is_pipe_enabled(dqm, 0, pipe))
......@@ -669,7 +670,8 @@ static int set_sched_resources(struct device_queue_manager *dqm)
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
* definition of res.queue_mask needs updating */
* definition of res.queue_mask needs updating
*/
if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
pr_err("Invalid queue enabled by amdgpu: %d\n", i);
break;
......
......@@ -194,7 +194,8 @@ static void release_event_notification_slot(struct signal_page *page,
page->free_slots++;
/* We don't free signal pages, they are retained by the process
* and reused until it exits. */
* and reused until it exits.
*/
}
static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
......@@ -584,7 +585,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
* search faster.
*/
struct signal_page *page;
unsigned i;
unsigned int i;
list_for_each_entry(page, &p->signal_event_pages, event_pages)
for (i = 0; i < SLOTS_PER_PAGE; i++)
......
......@@ -179,7 +179,7 @@ static void interrupt_wq(struct work_struct *work)
bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned wanted = 0;
unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
ih_ring_entry);
......
......@@ -61,7 +61,8 @@ MODULE_PARM_DESC(send_sigterm,
static int amdkfd_init_completed;
int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
int kgd2kfd_init(unsigned int interface_version,
const struct kgd2kfd_calls **g2f)
{
if (!amdkfd_init_completed)
return -EPROBE_DEFER;
......
......@@ -193,9 +193,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
}
m->cp_hqd_active = 0;
q->is_active = false;
......
......@@ -530,7 +530,6 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
if (pm->allocated)
pm_release_ib(pm);
return retval;
}
......
......@@ -32,7 +32,8 @@ int kfd_pasid_init(void)
{
pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
......
......@@ -28,14 +28,14 @@
#define PM4_MES_HEADER_DEFINED
union PM4_MES_TYPE_3_HEADER {
struct {
uint32_t reserved1:8; /* < reserved */
uint32_t opcode:8; /* < IT opcode */
uint32_t count:14; /* < number of DWORDs - 1
* in the information body.
*/
uint32_t type:2; /* < packet identifier.
* It should be 3 for type 3 packets
*/
/* reserved */
uint32_t reserved1:8;
/* IT opcode */
uint32_t opcode:8;
/* number of DWORDs - 1 in the information body */
uint32_t count:14;
/* packet identifier. It should be 3 for type 3 packets */
uint32_t type:2;
};
uint32_t u32all;
};
......
......@@ -30,10 +30,12 @@ union PM4_MES_TYPE_3_HEADER {
struct {
uint32_t reserved1 : 8; /* < reserved */
uint32_t opcode : 8; /* < IT opcode */
uint32_t count : 14;/* < number of DWORDs - 1 in the
information body. */
uint32_t type : 2; /* < packet identifier.
It should be 3 for type 3 packets */
uint32_t count : 14;/* < Number of DWORDS - 1 in the
* information body
*/
uint32_t type : 2; /* < packet identifier
* It should be 3 for type 3 packets
*/
};
uint32_t u32All;
};
......
......@@ -294,13 +294,13 @@ enum kfd_queue_format {
* @write_ptr: Defines the number of dwords written to the ring buffer.
*
* @doorbell_ptr: This field aim is to notify the H/W of new packet written to
* the queue ring buffer. This field should be similar to write_ptr and the user
* should update this field after he updated the write_ptr.
* the queue ring buffer. This field should be similar to write_ptr and the
* user should update this field after he updated the write_ptr.
*
* @doorbell_off: The doorbell offset in the doorbell pci-bar.
*
* @is_interop: Defines if this is a interop queue. Interop queue means that the
* queue can access both graphics and compute resources.
* @is_interop: Defines if this is a interop queue. Interop queue means that
* the queue can access both graphics and compute resources.
*
* @is_active: Defines if the queue is active or not.
*
......@@ -354,7 +354,8 @@ struct queue_properties {
* @mec: Used only in no cp scheduling mode and identifies to micro engine id
* that the queue should be execute on.
*
* @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
* @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
* id.
*
* @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
*
......@@ -520,8 +521,8 @@ struct kfd_process {
struct mutex event_mutex;
/* All events in process hashed by ID, linked on kfd_event.events. */
DECLARE_HASHTABLE(events, 4);
struct list_head signal_event_pages; /* struct slot_page_header.
event_pages */
/* struct slot_page_header.event_pages */
struct list_head signal_event_pages;
u32 next_nonsignal_event_id;
size_t signal_event_count;
};
......@@ -559,8 +560,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
/* Process device data iterator */
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
struct kfd_process_device *kfd_get_first_process_device_data(
struct kfd_process *p);
struct kfd_process_device *kfd_get_next_process_device_data(
struct kfd_process *p,
struct kfd_process_device *pdd);
bool kfd_has_process_device_data(struct kfd_process *p);
......
......@@ -449,14 +449,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
mutex_unlock(&p->mutex);
}
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
struct kfd_process_device *kfd_get_first_process_device_data(
struct kfd_process *p)
{
return list_first_entry(&p->per_device_data,
struct kfd_process_device,
per_device_list);
}
struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
struct kfd_process_device *kfd_get_next_process_device_data(
struct kfd_process *p,
struct kfd_process_device *pdd)
{
if (list_is_last(&pdd->per_device_list, &p->per_device_data))
......
......@@ -1170,8 +1170,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* GPU vBIOS
*/
/*
* Update the SYSFS tree, since we added another topology device
/* Update the SYSFS tree, since we added another topology
* device
*/
if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment