Commit fbeb661b authored by Yair Shachar's avatar Yair Shachar Committed by Oded Gabbay

drm/amdkfd: Add skeleton H/W debugger module support

This patch adds the skeleton H/W debugger module support. This code
enables registration and unregistration of a single HSA process at a
time.

The module saves the process's pasid and use it to verify that only the
registered process is allowed to execute debugger operations through the
kernel driver.

v2: rename get_dbgmgr_mutex to kfd_get_dbgmgr_mutex to namespace it
Signed-off-by: default avatarYair Shachar <yair.shachar@amd.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 992839ad
......@@ -12,6 +12,7 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_kernel_queue_vi.o kfd_packet_manager.o \
kfd_process_queue_manager.o kfd_device_queue_manager.o \
kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \
kfd_interrupt.o kfd_events.o cik_event_interrupt.o
kfd_interrupt.o kfd_events.o cik_event_interrupt.o \
kfd_dbgdev.o kfd_dbgmgr.o
obj-$(CONFIG_HSA_AMD) += amdkfd.o
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include "kfd_pm4_headers.h"
#include "kfd_pm4_headers_diq.h"
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
#include "kfd_pm4_opcodes.h"
#include "cik_regs.h"
#include "kfd_dbgmgr.h"
#include "kfd_dbgdev.h"
#include "kfd_device_queue_manager.h"
#include "../../radeon/cik_reg.h"
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
BUG_ON(!dev || !dev->kfd2kgd);
dev->kfd2kgd->address_watch_disable(dev->kgd);
}
static int dbgdev_register_nodiq(struct kfd_dbgdev *dbgdev)
{
BUG_ON(!dbgdev);
/*
* no action is needed in this case,
* just make sure diq will not be used
*/
dbgdev->kq = NULL;
return 0;
}
static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
{
struct queue_properties properties;
unsigned int qid;
struct kernel_queue *kq = NULL;
int status;
BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->dev);
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
&properties, 0, KFD_QUEUE_TYPE_DIQ,
&qid);
if (status) {
pr_err("amdkfd: Failed to create DIQ\n");
return status;
}
pr_debug("DIQ Created with queue id: %d\n", qid);
kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
if (kq == NULL) {
pr_err("amdkfd: Error getting DIQ\n");
pqm_destroy_queue(dbgdev->pqm, qid);
return -EFAULT;
}
dbgdev->kq = kq;
return status;
}
static int dbgdev_unregister_nodiq(struct kfd_dbgdev *dbgdev)
{
BUG_ON(!dbgdev || !dbgdev->dev);
/* disable watch address */
dbgdev_address_watch_disable_nodiq(dbgdev->dev);
return 0;
}
static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev)
{
/* todo - disable address watch */
int status;
BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->kq);
status = pqm_destroy_queue(dbgdev->pqm,
dbgdev->kq->queue->properties.queue_id);
dbgdev->kq = NULL;
return status;
}
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type)
{
BUG_ON(!pdbgdev || !pdev);
pdbgdev->dev = pdev;
pdbgdev->kq = NULL;
pdbgdev->type = type;
pdbgdev->pqm = NULL;
switch (type) {
case DBGDEV_TYPE_NODIQ:
pdbgdev->dbgdev_register = dbgdev_register_nodiq;
pdbgdev->dbgdev_unregister = dbgdev_unregister_nodiq;
break;
case DBGDEV_TYPE_DIQ:
default:
pdbgdev->dbgdev_register = dbgdev_register_diq;
pdbgdev->dbgdev_unregister = dbgdev_unregister_diq;
break;
}
}
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef KFD_DBGDEV_H_
#define KFD_DBGDEV_H_
enum {
SQ_CMD_VMID_OFFSET = 28,
ADDRESS_WATCH_CNTL_OFFSET = 24
};
enum {
PRIV_QUEUE_SYNC_TIME_MS = 200
};
/* CONTEXT reg space definition */
enum {
CONTEXT_REG_BASE = 0xA000,
CONTEXT_REG_END = 0xA400,
CONTEXT_REG_SIZE = CONTEXT_REG_END - CONTEXT_REG_BASE
};
/* USER CONFIG reg space definition */
enum {
USERCONFIG_REG_BASE = 0xC000,
USERCONFIG_REG_END = 0x10000,
USERCONFIG_REG_SIZE = USERCONFIG_REG_END - USERCONFIG_REG_BASE
};
/* CONFIG reg space definition */
enum {
CONFIG_REG_BASE = 0x2000, /* in dwords */
CONFIG_REG_END = 0x2B00,
CONFIG_REG_SIZE = CONFIG_REG_END - CONFIG_REG_BASE
};
/* SH reg space definition */
enum {
SH_REG_BASE = 0x2C00,
SH_REG_END = 0x3000,
SH_REG_SIZE = SH_REG_END - SH_REG_BASE
};
enum SQ_IND_CMD_CMD {
SQ_IND_CMD_CMD_NULL = 0x00000000,
SQ_IND_CMD_CMD_HALT = 0x00000001,
SQ_IND_CMD_CMD_RESUME = 0x00000002,
SQ_IND_CMD_CMD_KILL = 0x00000003,
SQ_IND_CMD_CMD_DEBUG = 0x00000004,
SQ_IND_CMD_CMD_TRAP = 0x00000005,
};
enum SQ_IND_CMD_MODE {
SQ_IND_CMD_MODE_SINGLE = 0x00000000,
SQ_IND_CMD_MODE_BROADCAST = 0x00000001,
SQ_IND_CMD_MODE_BROADCAST_QUEUE = 0x00000002,
SQ_IND_CMD_MODE_BROADCAST_PIPE = 0x00000003,
SQ_IND_CMD_MODE_BROADCAST_ME = 0x00000004,
};
union SQ_IND_INDEX_BITS {
struct {
uint32_t wave_id:4;
uint32_t simd_id:2;
uint32_t thread_id:6;
uint32_t:1;
uint32_t force_read:1;
uint32_t read_timeout:1;
uint32_t unindexed:1;
uint32_t index:16;
} bitfields, bits;
uint32_t u32All;
signed int i32All;
float f32All;
};
union SQ_IND_CMD_BITS {
struct {
uint32_t data:32;
} bitfields, bits;
uint32_t u32All;
signed int i32All;
float f32All;
};
union SQ_CMD_BITS {
struct {
uint32_t cmd:3;
uint32_t:1;
uint32_t mode:3;
uint32_t check_vmid:1;
uint32_t trap_id:3;
uint32_t:5;
uint32_t wave_id:4;
uint32_t simd_id:2;
uint32_t:2;
uint32_t queue_id:3;
uint32_t:1;
uint32_t vm_id:4;
} bitfields, bits;
uint32_t u32All;
signed int i32All;
float f32All;
};
union SQ_IND_DATA_BITS {
struct {
uint32_t data:32;
} bitfields, bits;
uint32_t u32All;
signed int i32All;
float f32All;
};
union GRBM_GFX_INDEX_BITS {
struct {
uint32_t instance_index:8;
uint32_t sh_index:8;
uint32_t se_index:8;
uint32_t:5;
uint32_t sh_broadcast_writes:1;
uint32_t instance_broadcast_writes:1;
uint32_t se_broadcast_writes:1;
} bitfields, bits;
uint32_t u32All;
signed int i32All;
float f32All;
};
union TCP_WATCH_ADDR_H_BITS {
struct {
uint32_t addr:16;
uint32_t:16;
} bitfields, bits;
uint32_t u32All;
signed int i32All;
float f32All;
};
union TCP_WATCH_ADDR_L_BITS {
struct {
uint32_t:6;
uint32_t addr:26;
} bitfields, bits;
uint32_t u32All;
signed int i32All;
float f32All;
};
enum {
QUEUESTATE__INVALID = 0, /* so by default we'll get invalid state */
QUEUESTATE__ACTIVE_COMPLETION_PENDING,
QUEUESTATE__ACTIVE
};
union ULARGE_INTEGER {
struct {
uint32_t low_part;
uint32_t high_part;
} u;
unsigned long long quad_part;
};
#define KFD_CIK_VMID_START_OFFSET (8)
#define KFD_CIK_VMID_END_OFFSET (KFD_CIK_VMID_START_OFFSET + (8))
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type);
#endif /* KFD_DBGDEV_H_ */
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/device.h>
#include "kfd_priv.h"
#include "cik_regs.h"
#include "kfd_pm4_headers.h"
#include "kfd_pm4_headers_diq.h"
#include "kfd_dbgmgr.h"
#include "kfd_dbgdev.h"
static DEFINE_MUTEX(kfd_dbgmgr_mutex);
struct mutex *kfd_get_dbgmgr_mutex(void)
{
return &kfd_dbgmgr_mutex;
}
static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
{
BUG_ON(!pmgr);
kfree(pmgr->dbgdev);
pmgr->dbgdev = NULL;
pmgr->pasid = 0;
pmgr->dev = NULL;
}
void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
{
if (pmgr != NULL) {
kfd_dbgmgr_uninitialize(pmgr);
kfree(pmgr);
}
}
bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
{
enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
struct kfd_dbgmgr *new_buff;
BUG_ON(pdev == NULL);
BUG_ON(!pdev->init_complete);
new_buff = kfd_alloc_struct(new_buff);
if (!new_buff) {
pr_err("amdkfd: Failed to allocate dbgmgr instance\n");
return false;
}
new_buff->pasid = 0;
new_buff->dev = pdev;
new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev);
if (!new_buff->dbgdev) {
pr_err("amdkfd: Failed to allocate dbgdev instance\n");
kfree(new_buff);
return false;
}
/* get actual type of DBGDevice cpsch or not */
if (sched_policy == KFD_SCHED_POLICY_NO_HWS)
type = DBGDEV_TYPE_NODIQ;
kfd_dbgdev_init(new_buff->dbgdev, pdev, type);
*ppmgr = new_buff;
return true;
}
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
BUG_ON(!p || !pmgr || !pmgr->dbgdev);
if (pmgr->pasid != 0) {
pr_debug("H/W debugger is already active using pasid %d\n",
pmgr->pasid);
return -EBUSY;
}
/* remember pasid */
pmgr->pasid = p->pasid;
/* provide the pqm for diq generation */
pmgr->dbgdev->pqm = &p->pqm;
/* activate the actual registering */
pmgr->dbgdev->dbgdev_register(pmgr->dbgdev);
return 0;
}
long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
BUG_ON(!p || !pmgr || !pmgr->dbgdev);
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
pr_debug("H/W debugger is not registered by calling pasid %d\n",
p->pasid);
return -EINVAL;
}
pmgr->dbgdev->dbgdev_unregister(pmgr->dbgdev);
pmgr->pasid = 0;
return 0;
}
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef KFD_DBGMGR_H_
#define KFD_DBGMGR_H_
#include "kfd_priv.h"
/* must align with hsakmttypes definition */
#pragma pack(push, 4)
enum HSA_DBG_WAVEOP {
HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter
debug mode */
HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take
a trap */
HSA_DBG_NUM_WAVEOP = 5,
HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
};
enum HSA_DBG_WAVEMODE {
/* send command to a single wave */
HSA_DBG_WAVEMODE_SINGLE = 0,
/*
* Broadcast to all wavefronts of all processes is not
* supported for HSA user mode
*/
/* send to waves within current process */
HSA_DBG_WAVEMODE_BROADCAST_PROCESS = 2,
/* send to waves within current process on CU */
HSA_DBG_WAVEMODE_BROADCAST_PROCESS_CU = 3,
HSA_DBG_NUM_WAVEMODE = 3,
HSA_DBG_MAX_WAVEMODE = 0xFFFFFFFF
};
enum HSA_DBG_WAVEMSG_TYPE {
HSA_DBG_WAVEMSG_AUTO = 0,
HSA_DBG_WAVEMSG_USER = 1,
HSA_DBG_WAVEMSG_ERROR = 2,
HSA_DBG_NUM_WAVEMSG,
HSA_DBG_MAX_WAVEMSG = 0xFFFFFFFF
};
enum HSA_DBG_WATCH_MODE {
HSA_DBG_WATCH_READ = 0, /* Read operations only */
HSA_DBG_WATCH_NONREAD = 1, /* Write or Atomic operations only */
HSA_DBG_WATCH_ATOMIC = 2, /* Atomic Operations only */
HSA_DBG_WATCH_ALL = 3, /* Read, Write or Atomic operations */
HSA_DBG_WATCH_NUM,
HSA_DBG_WATCH_SIZE = 0xFFFFFFFF
};
/* This structure is hardware specific and may change in the future */
struct HsaDbgWaveMsgAMDGen2 {
union {
struct ui32 {
uint32_t UserData:8; /* user data */
uint32_t ShaderArray:1; /* Shader array */
uint32_t Priv:1; /* Privileged */
uint32_t Reserved0:4; /* This field is reserved,
should be 0 */
uint32_t WaveId:4; /* wave id */
uint32_t SIMD:2; /* SIMD id */
uint32_t HSACU:4; /* Compute unit */
uint32_t ShaderEngine:2;/* Shader engine */
uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
uint32_t Reserved1:4; /* This field is reserved,
should be 0 */
} ui32;
uint32_t Value;
};
uint32_t Reserved2;
};
union HsaDbgWaveMessageAMD {
struct HsaDbgWaveMsgAMDGen2 WaveMsgInfoGen2;
/* for future HsaDbgWaveMsgAMDGen3; */
};
struct HsaDbgWaveMessage {
void *MemoryVA; /* ptr to associated host-accessible data */
union HsaDbgWaveMessageAMD DbgWaveMsg;
};
/*
* TODO: This definitions to be MOVED to kfd_event, once it is implemented.
*
* HSA sync primitive, Event and HW Exception notification API definitions.
* The API functions allow the runtime to define a so-called sync-primitive,
* a SW object combining a user-mode provided "syncvar" and a scheduler event
* that can be signaled through a defined GPU interrupt. A syncvar is
* a process virtual memory location of a certain size that can be accessed
* by CPU and GPU shader code within the process to set and query the content
* within that memory. The definition of the content is determined by the HSA
* runtime and potentially GPU shader code interfacing with the HSA runtime.
* The syncvar values may be commonly written through an PM4 WRITE_DATA packet
* in the user mode instruction stream. The OS scheduler event is typically
* associated and signaled by an interrupt issued by the GPU, but other HSA
* system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
* by the KFD by this mechanism, too. */
/* these are the new definitions for events */
enum HSA_EVENTTYPE {
HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */
HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */
HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change
(start/stop) */
HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */
HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state
(EOP pm4) */
/* ... */
HSA_EVENTTYPE_MAXID,
HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
};
/* Sub-definitions for various event types: Syncvar */
struct HsaSyncVar {
union SyncVar {
void *UserData; /* pointer to user mode data */
uint64_t UserDataPtrValue; /* 64bit compatibility of value */
} SyncVar;
uint64_t SyncVarSize;
};
/* Sub-definitions for various event types: NodeChange */
enum HSA_EVENTTYPE_NODECHANGE_FLAGS {
HSA_EVENTTYPE_NODECHANGE_ADD = 0,
HSA_EVENTTYPE_NODECHANGE_REMOVE = 1,
HSA_EVENTTYPE_NODECHANGE_SIZE = 0xFFFFFFFF
};
struct HsaNodeChange {
/* HSA node added/removed on the platform */
enum HSA_EVENTTYPE_NODECHANGE_FLAGS Flags;
};
/* Sub-definitions for various event types: DeviceStateChange */
enum HSA_EVENTTYPE_DEVICESTATECHANGE_FLAGS {
/* device started (and available) */
HSA_EVENTTYPE_DEVICESTATUSCHANGE_START = 0,
/* device stopped (i.e. unavailable) */
HSA_EVENTTYPE_DEVICESTATUSCHANGE_STOP = 1,
HSA_EVENTTYPE_DEVICESTATUSCHANGE_SIZE = 0xFFFFFFFF
};
enum HSA_DEVICE {
HSA_DEVICE_CPU = 0,
HSA_DEVICE_GPU = 1,
MAX_HSA_DEVICE = 2
};
struct HsaDeviceStateChange {
uint32_t NodeId; /* F-NUMA node that contains the device */
enum HSA_DEVICE Device; /* device type: GPU or CPU */
enum HSA_EVENTTYPE_DEVICESTATECHANGE_FLAGS Flags; /* event flags */
};
struct HsaEventData {
enum HSA_EVENTTYPE EventType; /* event type */
union EventData {
/*
* return data associated with HSA_EVENTTYPE_SIGNAL
* and other events
*/
struct HsaSyncVar SyncVar;
/* data associated with HSA_EVENTTYPE_NODE_CHANGE */
struct HsaNodeChange NodeChangeState;
/* data associated with HSA_EVENTTYPE_DEVICE_STATE_CHANGE */
struct HsaDeviceStateChange DeviceState;
} EventData;
/* the following data entries are internal to the KFD & thunk itself */
/* internal thunk store for Event data (OsEventHandle) */
uint64_t HWData1;
/* internal thunk store for Event data (HWAddress) */
uint64_t HWData2;
/* internal thunk store for Event data (HWData) */
uint32_t HWData3;
};
struct HsaEventDescriptor {
/* event type to allocate */
enum HSA_EVENTTYPE EventType;
/* H-NUMA node containing GPU device that is event source */
uint32_t NodeId;
/* pointer to user mode syncvar data, syncvar->UserDataPtrValue
* may be NULL
*/
struct HsaSyncVar SyncVar;
};
struct HsaEvent {
uint32_t EventId;
struct HsaEventData EventData;
};
#pragma pack(pop)
enum DBGDEV_TYPE {
DBGDEV_TYPE_ILLEGAL = 0,
DBGDEV_TYPE_NODIQ = 1,
DBGDEV_TYPE_DIQ = 2,
DBGDEV_TYPE_TEST = 3
};
struct dbg_address_watch_info {
struct kfd_process *process;
enum HSA_DBG_WATCH_MODE *watch_mode;
uint64_t *watch_address;
uint64_t *watch_mask;
struct HsaEvent *watch_event;
uint32_t num_watch_points;
};
struct dbg_wave_control_info {
struct kfd_process *process;
uint32_t trapId;
enum HSA_DBG_WAVEOP operand;
enum HSA_DBG_WAVEMODE mode;
struct HsaDbgWaveMessage dbgWave_msg;
};
struct kfd_dbgdev {
/* The device that owns this data. */
struct kfd_dev *dev;
/* kernel queue for DIQ */
struct kernel_queue *kq;
/* a pointer to the pqm of the calling process */
struct process_queue_manager *pqm;
/* type of debug device ( DIQ, non DIQ, etc. ) */
enum DBGDEV_TYPE type;
/* virtualized function pointers to device dbg */
int (*dbgdev_register)(struct kfd_dbgdev *dbgdev);
int (*dbgdev_unregister)(struct kfd_dbgdev *dbgdev);
};
struct kfd_dbgmgr {
unsigned int pasid;
struct kfd_dev *dev;
struct kfd_dbgdev *dbgdev;
};
/* prototypes for debug manager functions */
struct mutex *kfd_get_dbgmgr_mutex(void);
void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr);
bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev);
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
#endif /* KFD_DBGMGR_H_ */
......@@ -37,6 +37,7 @@ static const struct kfd_device_info kaveri_device_info = {
.max_no_of_hqd = 24,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED
};
......@@ -296,6 +297,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto dqm_start_error;
}
kfd->dbgmgr = NULL;
kfd->init_complete = true;
dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
kfd->pdev->device);
......
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef KFD_PM4_HEADERS_DIQ_H_
#define KFD_PM4_HEADERS_DIQ_H_
/*--------------------_INDIRECT_BUFFER-------------------- */
#ifndef _PM4__INDIRECT_BUFFER_DEFINED
#define _PM4__INDIRECT_BUFFER_DEFINED
enum _INDIRECT_BUFFER_cache_policy_enum {
cache_policy___indirect_buffer__lru = 0,
cache_policy___indirect_buffer__stream = 1,
cache_policy___indirect_buffer__bypass = 2
};
enum {
IT_INDIRECT_BUFFER_PASID = 0x5C
};
struct pm4__indirect_buffer_pasid {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
unsigned int ordinal1;
};
union {
struct {
unsigned int reserved1:2;
unsigned int ib_base_lo:30;
} bitfields2;
unsigned int ordinal2;
};
union {
struct {
unsigned int ib_base_hi:16;
unsigned int reserved2:16;
} bitfields3;
unsigned int ordinal3;
};
union {
unsigned int control;
unsigned int ordinal4;
};
union {
struct {
unsigned int pasid:10;
unsigned int reserved4:22;
} bitfields5;
unsigned int ordinal5;
};
};
#endif
/*--------------------_RELEASE_MEM-------------------- */
#ifndef _PM4__RELEASE_MEM_DEFINED
#define _PM4__RELEASE_MEM_DEFINED
enum _RELEASE_MEM_event_index_enum {
event_index___release_mem__end_of_pipe = 5,
event_index___release_mem__shader_done = 6
};
enum _RELEASE_MEM_cache_policy_enum {
cache_policy___release_mem__lru = 0,
cache_policy___release_mem__stream = 1,
cache_policy___release_mem__bypass = 2
};
enum _RELEASE_MEM_dst_sel_enum {
dst_sel___release_mem__memory_controller = 0,
dst_sel___release_mem__tc_l2 = 1,
dst_sel___release_mem__queue_write_pointer_register = 2,
dst_sel___release_mem__queue_write_pointer_poll_mask_bit = 3
};
enum _RELEASE_MEM_int_sel_enum {
int_sel___release_mem__none = 0,
int_sel___release_mem__send_interrupt_only = 1,
int_sel___release_mem__send_interrupt_after_write_confirm = 2,
int_sel___release_mem__send_data_after_write_confirm = 3
};
enum _RELEASE_MEM_data_sel_enum {
data_sel___release_mem__none = 0,
data_sel___release_mem__send_32_bit_low = 1,
data_sel___release_mem__send_64_bit_data = 2,
data_sel___release_mem__send_gpu_clock_counter = 3,
data_sel___release_mem__send_cp_perfcounter_hi_lo = 4,
data_sel___release_mem__store_gds_data_to_memory = 5
};
struct pm4__release_mem {
union {
union PM4_MES_TYPE_3_HEADER header; /*header */
unsigned int ordinal1;
};
union {
struct {
unsigned int event_type:6;
unsigned int reserved1:2;
enum _RELEASE_MEM_event_index_enum event_index:4;
unsigned int tcl1_vol_action_ena:1;
unsigned int tc_vol_action_ena:1;
unsigned int reserved2:1;
unsigned int tc_wb_action_ena:1;
unsigned int tcl1_action_ena:1;
unsigned int tc_action_ena:1;
unsigned int reserved3:6;
unsigned int atc:1;
enum _RELEASE_MEM_cache_policy_enum cache_policy:2;
unsigned int reserved4:5;
} bitfields2;
unsigned int ordinal2;
};
union {
struct {
unsigned int reserved5:16;
enum _RELEASE_MEM_dst_sel_enum dst_sel:2;
unsigned int reserved6:6;
enum _RELEASE_MEM_int_sel_enum int_sel:3;
unsigned int reserved7:2;
enum _RELEASE_MEM_data_sel_enum data_sel:3;
} bitfields3;
unsigned int ordinal3;
};
union {
struct {
unsigned int reserved8:2;
unsigned int address_lo_32b:30;
} bitfields4;
struct {
unsigned int reserved9:3;
unsigned int address_lo_64b:29;
} bitfields5;
unsigned int ordinal4;
};
unsigned int address_hi;
unsigned int data_lo;
unsigned int data_hi;
};
#endif
/*--------------------_SET_CONFIG_REG-------------------- */
#ifndef _PM4__SET_CONFIG_REG_DEFINED
#define _PM4__SET_CONFIG_REG_DEFINED
struct pm4__set_config_reg {
union {
union PM4_MES_TYPE_3_HEADER header; /*header */
unsigned int ordinal1;
};
union {
struct {
unsigned int reg_offset:16;
unsigned int reserved1:7;
unsigned int vmid_shift:5;
unsigned int insert_vmid:1;
unsigned int reserved2:3;
} bitfields2;
unsigned int ordinal2;
};
unsigned int reg_data[1]; /*1..N of these fields */
};
#endif
/*--------------------_WAIT_REG_MEM-------------------- */
#ifndef _PM4__WAIT_REG_MEM_DEFINED
#define _PM4__WAIT_REG_MEM_DEFINED
enum _WAIT_REG_MEM_function_enum {
function___wait_reg_mem__always_pass = 0,
function___wait_reg_mem__less_than_ref_value = 1,
function___wait_reg_mem__less_than_equal_to_the_ref_value = 2,
function___wait_reg_mem__equal_to_the_reference_value = 3,
function___wait_reg_mem__not_equal_reference_value = 4,
function___wait_reg_mem__greater_than_or_equal_reference_value = 5,
function___wait_reg_mem__greater_than_reference_value = 6,
function___wait_reg_mem__reserved = 7
};
enum _WAIT_REG_MEM_mem_space_enum {
mem_space___wait_reg_mem__register_space = 0,
mem_space___wait_reg_mem__memory_space = 1
};
enum _WAIT_REG_MEM_operation_enum {
operation___wait_reg_mem__wait_reg_mem = 0,
operation___wait_reg_mem__wr_wait_wr_reg = 1
};
struct pm4__wait_reg_mem {
union {
union PM4_MES_TYPE_3_HEADER header; /*header */
unsigned int ordinal1;
};
union {
struct {
enum _WAIT_REG_MEM_function_enum function:3;
unsigned int reserved1:1;
enum _WAIT_REG_MEM_mem_space_enum mem_space:2;
enum _WAIT_REG_MEM_operation_enum operation:2;
unsigned int reserved2:24;
} bitfields2;
unsigned int ordinal2;
};
union {
struct {
unsigned int reserved3:2;
unsigned int memory_poll_addr_lo:30;
} bitfields3;
struct {
unsigned int register_poll_addr:16;
unsigned int reserved4:16;
} bitfields4;
struct {
unsigned int register_write_addr:16;
unsigned int reserved5:16;
} bitfields5;
unsigned int ordinal3;
};
union {
struct {
unsigned int poll_address_hi:16;
unsigned int reserved6:16;
} bitfields6;
struct {
unsigned int register_write_addr:16;
unsigned int reserved7:16;
} bitfields7;
unsigned int ordinal4;
};
unsigned int reference;
unsigned int mask;
union {
struct {
unsigned int poll_interval:16;
unsigned int reserved8:16;
} bitfields8;
unsigned int ordinal7;
};
};
#endif
#endif /* KFD_PM4_HEADERS_DIQ_H_ */
......@@ -196,6 +196,9 @@ struct kfd_dev {
* from the HW ring into a SW ring.
*/
bool interrupts_active;
/* Debug manager */
struct kfd_dbgmgr *dbgmgr;
};
/* KGD2KFD callbacks */
......@@ -650,6 +653,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
/* Packet Manager */
......
......@@ -357,7 +357,7 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
return 0;
}
static __attribute__((unused)) struct kernel_queue *pqm_get_kernel_queue(
struct kernel_queue *pqm_get_kernel_queue(
struct process_queue_manager *pqm,
unsigned int qid)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment