Commit a0aeb3b2 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-amdkfd-next-2017-08-18' of git://people.freedesktop.org/~gabbayo/linux into drm-next

This is the amdkfd pull request for 4.14 merge window.
AMD has started cleaning the pipe and sending patches from their internal
development to the upstream community.

The plan as I understand it is to first get all the non-dGPU patches to
upstream and then move to upstream dGPU support.

The patches here are relevant only for Kaveri and Carrizo.

The following is a summary of the changes:
- Add new IOCTL to set a Scratch memory VA
- Update PM4 headers for new firmware that support scratch memory
- Support image tiling mode
- Remove all uses of BUG_ON
- Various Bug fixes and coding style fixes

* tag 'drm-amdkfd-next-2017-08-18' of git://people.freedesktop.org/~gabbayo/linux: (24 commits)
  drm/amdkfd: Implement image tiling mode support v2
  drm/amdgpu: Add kgd kfd interface get_tile_config() v2
  drm/amdkfd: Adding new IOCTL for scratch memory v2
  drm/amdgpu: Add kgd/kfd interface to support scratch memory v2
  drm/amdgpu: Program SH_STATIC_MEM_CONFIG globally, not per-VMID
  drm/amd: Update MEC HQD loading code for KFD
  drm/amdgpu: Disable GFX PG on CZ
  drm/amdkfd: Update PM4 packet headers
  drm/amdkfd: Clamp EOP queue size correctly on Gfx8
  drm/amdkfd: Add more error printing to help bringup v2
  drm/amdkfd: Handle remaining BUG_ONs more gracefully v2
  drm/amdkfd: Allocate gtt_sa_bitmap in long units
  drm/amdkfd: Fix doorbell initialization and finalization
  drm/amdkfd: Remove BUG_ONs for NULL pointer arguments
  drm/amdkfd: Remove usage of alloc(sizeof(struct...
  drm/amdkfd: Fix goto usage v2
  drm/amdkfd: Change x==NULL/false references to !x
  drm/amdkfd: Consolidate and clean up log commands
  drm/amdkfd: Clean up KFD style errors and warnings v2
  drm/amdgpu: Remove hard-coded assumptions about compute pipes
  ...
parents 5fd27c2a 5d71dbc3
...@@ -28,14 +28,14 @@ ...@@ -28,14 +28,14 @@
#include <linux/module.h> #include <linux/module.h>
const struct kgd2kfd_calls *kgd2kfd; const struct kgd2kfd_calls *kgd2kfd;
bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
int amdgpu_amdkfd_init(void) int amdgpu_amdkfd_init(void)
{ {
int ret; int ret;
#if defined(CONFIG_HSA_AMD_MODULE) #if defined(CONFIG_HSA_AMD_MODULE)
int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init); kgd2kfd_init_p = symbol_request(kgd2kfd_init);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define AMDGPU_AMDKFD_H_INCLUDED #define AMDGPU_AMDKFD_H_INCLUDED
#include <linux/types.h> #include <linux/types.h>
#include <linux/mmu_context.h>
#include <kgd_kfd_interface.h> #include <kgd_kfd_interface.h>
struct amdgpu_device; struct amdgpu_device;
...@@ -60,4 +61,19 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); ...@@ -60,4 +61,19 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
#define read_user_wptr(mmptr, wptr, dst) \
({ \
bool valid = false; \
if ((mmptr) && (wptr)) { \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
} else if (current->mm == NULL) { \
use_mm(mmptr); \
valid = !get_user((dst), (wptr)); \
unuse_mm(mmptr); \
} \
} \
valid; \
})
#endif /* AMDGPU_AMDKFD_H_INCLUDED */ #endif /* AMDGPU_AMDKFD_H_INCLUDED */
...@@ -39,6 +39,12 @@ ...@@ -39,6 +39,12 @@
#include "gmc/gmc_7_1_sh_mask.h" #include "gmc/gmc_7_1_sh_mask.h"
#include "cik_structs.h" #include "cik_structs.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
RESET_WAVES
};
enum { enum {
MAX_TRAPID = 8, /* 3 bits in the bitfield. */ MAX_TRAPID = 8, /* 3 bits in the bitfield. */
MAX_WATCH_ADDRESSES = 4 MAX_WATCH_ADDRESSES = 4
...@@ -96,12 +102,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, ...@@ -96,12 +102,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr); uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr); uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id); uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id, unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id); uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
...@@ -126,6 +135,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, ...@@ -126,6 +135,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
static int get_tile_config(struct kgd_dev *kgd,
struct tile_config *config)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
config->gb_addr_config = adev->gfx.config.gb_addr_config;
config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
MC_ARB_RAMCFG, NOOFBANK);
config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
MC_ARB_RAMCFG, NOOFRANKS);
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
config->num_tile_configs =
ARRAY_SIZE(adev->gfx.config.tile_mode_array);
config->macro_tile_config_ptr =
adev->gfx.config.macrotile_mode_array;
config->num_macro_tile_configs =
ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
return 0;
}
static const struct kfd2kgd_calls kfd2kgd = { static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem, .init_gtt_mem_allocation = alloc_gtt_mem,
...@@ -150,7 +186,9 @@ static const struct kfd2kgd_calls kfd2kgd = { ...@@ -150,7 +186,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
.write_vmid_invalidate_request = write_vmid_invalidate_request, .write_vmid_invalidate_request = write_vmid_invalidate_request,
.get_fw_version = get_fw_version .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
}; };
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
...@@ -186,7 +224,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, ...@@ -186,7 +224,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0); lock_srbm(kgd, mec, pipe, queue_id, 0);
...@@ -290,20 +328,38 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) ...@@ -290,20 +328,38 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
} }
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr) uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t wptr_shadow, is_wptr_shadow_valid;
struct cik_mqd *m; struct cik_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
m = get_mqd(mqd); m = get_mqd(mqd);
is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
if (is_wptr_shadow_valid)
m->cp_hqd_pq_wptr = wptr_shadow;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(kgd, pipe_id, queue_id);
gfx_v7_0_mqd_commit(adev, m);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
/* Copy userspace write pointer value to register.
* Activate doorbell logic to monitor subsequent changes.
*/
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
if (read_user_wptr(mm, wptr, wptr_val))
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(mmCP_HQD_ACTIVE, data);
release_queue(kgd); release_queue(kgd);
return 0; return 0;
...@@ -382,30 +438,99 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) ...@@ -382,30 +438,99 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false; return false;
} }
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id, unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id) uint32_t queue_id)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp; uint32_t temp;
int timeout = utimeout; enum hqd_dequeue_request_type type;
unsigned long flags, end_jiffies;
int retry;
acquire_queue(kgd, pipe_id, queue_id); acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type); switch (reset_type) {
case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
type = DRAIN_PIPE;
break;
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
default:
type = DRAIN_PIPE;
break;
}
/* Workaround: If IQ timer is active and the wait time is close to or
* equal to 0, dequeueing is not safe. Wait until either the wait time
* is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
* cleared before continuing. Also, ensure wait times are set to at
* least 0x3.
*/
local_irq_save(flags);
preempt_disable();
retry = 5000; /* wait for 500 usecs at maximum */
while (true) {
temp = RREG32(mmCP_HQD_IQ_TIMER);
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
pr_debug("HW is processing IQ\n");
goto loop;
}
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
== 3) /* SEM-rearm is safe */
break;
/* Wait time 3 is safe for CP, but our MMIO read/write
* time is close to 1 microsecond, so check for 10 to
* leave more buffer room
*/
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
>= 10)
break;
pr_debug("IQ timer is active\n");
} else
break;
loop:
if (!retry) {
pr_err("CP HQD IQ timer status time out\n");
break;
}
ndelay(100);
--retry;
}
retry = 1000;
while (true) {
temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
break;
pr_debug("Dequeue request is pending\n");
if (!retry) {
pr_err("CP HQD dequeue request time out\n");
break;
}
ndelay(100);
--retry;
}
local_irq_restore(flags);
preempt_enable();
WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) { while (true) {
temp = RREG32(mmCP_HQD_ACTIVE); temp = RREG32(mmCP_HQD_ACTIVE);
if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break; break;
if (timeout <= 0) { if (time_after(jiffies, end_jiffies)) {
pr_err("kfd: cp queue preemption time out.\n"); pr_err("cp queue preemption time out\n");
release_queue(kgd); release_queue(kgd);
return -ETIME; return -ETIME;
} }
msleep(20); usleep_range(500, 1000);
timeout -= 20;
} }
release_queue(kgd); release_queue(kgd);
...@@ -556,6 +681,16 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) ...@@ -556,6 +681,16 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
} }
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
lock_srbm(kgd, 0, 0, 0, vmid);
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
unlock_srbm(kgd);
}
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *) kgd; struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
...@@ -566,42 +701,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) ...@@ -566,42 +701,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) { switch (type) {
case KGD_ENGINE_PFP: case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.pfp_fw->data; adev->gfx.pfp_fw->data;
break; break;
case KGD_ENGINE_ME: case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.me_fw->data; adev->gfx.me_fw->data;
break; break;
case KGD_ENGINE_CE: case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.ce_fw->data; adev->gfx.ce_fw->data;
break; break;
case KGD_ENGINE_MEC1: case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.mec_fw->data; adev->gfx.mec_fw->data;
break; break;
case KGD_ENGINE_MEC2: case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.mec2_fw->data; adev->gfx.mec2_fw->data;
break; break;
case KGD_ENGINE_RLC: case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->gfx.rlc_fw->data; adev->gfx.rlc_fw->data;
break; break;
case KGD_ENGINE_SDMA1: case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma.instance[0].fw->data; adev->sdma.instance[0].fw->data;
break; break;
case KGD_ENGINE_SDMA2: case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma.instance[1].fw->data; adev->sdma.instance[1].fw->data;
break; break;
default: default:
......
...@@ -1921,6 +1921,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) ...@@ -1921,6 +1921,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
ELEMENT_SIZE, 1); ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3); INDEX_STRIDE, 3);
WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
...@@ -1934,7 +1935,6 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) ...@@ -1934,7 +1935,6 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmSH_MEM_APE1_BASE, 1); WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0); WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, sh_mem_base); WREG32(mmSH_MEM_BASES, sh_mem_base);
WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
} }
cik_srbm_select(adev, 0, 0, 0, 0); cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
......
...@@ -3707,6 +3707,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) ...@@ -3707,6 +3707,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
ELEMENT_SIZE, 1); ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3); INDEX_STRIDE, 3);
WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i); vi_srbm_select(adev, 0, 0, 0, i);
...@@ -3730,7 +3732,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) ...@@ -3730,7 +3732,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmSH_MEM_APE1_BASE, 1); WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0); WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
} }
vi_srbm_select(adev, 0, 0, 0, 0); vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
......
...@@ -1028,8 +1028,7 @@ static int vi_common_early_init(void *handle) ...@@ -1028,8 +1028,7 @@ static int vi_common_early_init(void *handle)
/* rev0 hardware requires workarounds to support PG */ /* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0; adev->pg_flags = 0;
if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE | AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD | AMD_PG_SUPPORT_UVD |
......
This diff is collapsed.
This diff is collapsed.
...@@ -44,8 +44,6 @@ struct mutex *kfd_get_dbgmgr_mutex(void) ...@@ -44,8 +44,6 @@ struct mutex *kfd_get_dbgmgr_mutex(void)
static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr) static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
{ {
BUG_ON(!pmgr);
kfree(pmgr->dbgdev); kfree(pmgr->dbgdev);
pmgr->dbgdev = NULL; pmgr->dbgdev = NULL;
...@@ -55,7 +53,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr) ...@@ -55,7 +53,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr) void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
{ {
if (pmgr != NULL) { if (pmgr) {
kfd_dbgmgr_uninitialize(pmgr); kfd_dbgmgr_uninitialize(pmgr);
kfree(pmgr); kfree(pmgr);
} }
...@@ -66,12 +64,12 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) ...@@ -66,12 +64,12 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ; enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
struct kfd_dbgmgr *new_buff; struct kfd_dbgmgr *new_buff;
BUG_ON(pdev == NULL); if (WARN_ON(!pdev->init_complete))
BUG_ON(!pdev->init_complete); return false;
new_buff = kfd_alloc_struct(new_buff); new_buff = kfd_alloc_struct(new_buff);
if (!new_buff) { if (!new_buff) {
pr_err("amdkfd: Failed to allocate dbgmgr instance\n"); pr_err("Failed to allocate dbgmgr instance\n");
return false; return false;
} }
...@@ -79,7 +77,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) ...@@ -79,7 +77,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
new_buff->dev = pdev; new_buff->dev = pdev;
new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev); new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev);
if (!new_buff->dbgdev) { if (!new_buff->dbgdev) {
pr_err("amdkfd: Failed to allocate dbgdev instance\n"); pr_err("Failed to allocate dbgdev instance\n");
kfree(new_buff); kfree(new_buff);
return false; return false;
} }
...@@ -96,8 +94,6 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) ...@@ -96,8 +94,6 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p) long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{ {
BUG_ON(!p || !pmgr || !pmgr->dbgdev);
if (pmgr->pasid != 0) { if (pmgr->pasid != 0) {
pr_debug("H/W debugger is already active using pasid %d\n", pr_debug("H/W debugger is already active using pasid %d\n",
pmgr->pasid); pmgr->pasid);
...@@ -118,8 +114,6 @@ long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p) ...@@ -118,8 +114,6 @@ long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p) long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{ {
BUG_ON(!p || !pmgr || !pmgr->dbgdev);
/* Is the requests coming from the already registered process? */ /* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) { if (pmgr->pasid != p->pasid) {
pr_debug("H/W debugger is not registered by calling pasid %d\n", pr_debug("H/W debugger is not registered by calling pasid %d\n",
...@@ -137,8 +131,6 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p) ...@@ -137,8 +131,6 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr, long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
struct dbg_wave_control_info *wac_info) struct dbg_wave_control_info *wac_info)
{ {
BUG_ON(!pmgr || !pmgr->dbgdev || !wac_info);
/* Is the requests coming from the already registered process? */ /* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) { if (pmgr->pasid != wac_info->process->pasid) {
pr_debug("H/W debugger support was not registered for requester pasid %d\n", pr_debug("H/W debugger support was not registered for requester pasid %d\n",
...@@ -152,9 +144,6 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr, ...@@ -152,9 +144,6 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr, long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
struct dbg_address_watch_info *adw_info) struct dbg_address_watch_info *adw_info)
{ {
BUG_ON(!pmgr || !pmgr->dbgdev || !adw_info);
/* Is the requests coming from the already registered process? */ /* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) { if (pmgr->pasid != adw_info->process->pasid) {
pr_debug("H/W debugger support was not registered for requester pasid %d\n", pr_debug("H/W debugger support was not registered for requester pasid %d\n",
......
...@@ -30,13 +30,11 @@ ...@@ -30,13 +30,11 @@
#pragma pack(push, 4) #pragma pack(push, 4)
enum HSA_DBG_WAVEOP { enum HSA_DBG_WAVEOP {
HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */ HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */ HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */ HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter dbg mode */
debug mode */ HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take a trap */
HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take
a trap */
HSA_DBG_NUM_WAVEOP = 5, HSA_DBG_NUM_WAVEOP = 5,
HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
}; };
...@@ -81,15 +79,13 @@ struct HsaDbgWaveMsgAMDGen2 { ...@@ -81,15 +79,13 @@ struct HsaDbgWaveMsgAMDGen2 {
uint32_t UserData:8; /* user data */ uint32_t UserData:8; /* user data */
uint32_t ShaderArray:1; /* Shader array */ uint32_t ShaderArray:1; /* Shader array */
uint32_t Priv:1; /* Privileged */ uint32_t Priv:1; /* Privileged */
uint32_t Reserved0:4; /* This field is reserved, uint32_t Reserved0:4; /* Reserved, should be 0 */
should be 0 */
uint32_t WaveId:4; /* wave id */ uint32_t WaveId:4; /* wave id */
uint32_t SIMD:2; /* SIMD id */ uint32_t SIMD:2; /* SIMD id */
uint32_t HSACU:4; /* Compute unit */ uint32_t HSACU:4; /* Compute unit */
uint32_t ShaderEngine:2;/* Shader engine */ uint32_t ShaderEngine:2;/* Shader engine */
uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */ uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
uint32_t Reserved1:4; /* This field is reserved, uint32_t Reserved1:4; /* Reserved, should be 0 */
should be 0 */
} ui32; } ui32;
uint32_t Value; uint32_t Value;
}; };
...@@ -121,20 +117,23 @@ struct HsaDbgWaveMessage { ...@@ -121,20 +117,23 @@ struct HsaDbgWaveMessage {
* in the user mode instruction stream. The OS scheduler event is typically * in the user mode instruction stream. The OS scheduler event is typically
* associated and signaled by an interrupt issued by the GPU, but other HSA * associated and signaled by an interrupt issued by the GPU, but other HSA
* system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced * system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
* by the KFD by this mechanism, too. */ * by the KFD by this mechanism, too.
*/
/* these are the new definitions for events */ /* these are the new definitions for events */
enum HSA_EVENTTYPE { enum HSA_EVENTTYPE {
HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */ HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */
HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */ HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */
HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change
(start/stop) */ * (start/stop)
*/
HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */ HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */ HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */ HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */
HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */ HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state
(EOP pm4) */ * (EOP pm4)
*/
/* ... */ /* ... */
HSA_EVENTTYPE_MAXID, HSA_EVENTTYPE_MAXID,
HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
......
This diff is collapsed.
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "kfd_device_queue_manager.h" #include "kfd_device_queue_manager.h"
#include "cik_regs.h" #include "cik_regs.h"
#include "oss/oss_2_4_sh_mask.h" #include "oss/oss_2_4_sh_mask.h"
#include "gca/gfx_7_2_sh_mask.h"
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd, struct qcm_process_device *qpd,
...@@ -65,7 +66,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) ...@@ -65,7 +66,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM. * for LDS/Scratch and GPUVM.
*/ */
BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE || WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0); top_address_nybble == 0);
return PRIVATE_BASE(top_address_nybble << 12) | return PRIVATE_BASE(top_address_nybble << 12) |
...@@ -104,8 +105,6 @@ static int register_process_cik(struct device_queue_manager *dqm, ...@@ -104,8 +105,6 @@ static int register_process_cik(struct device_queue_manager *dqm,
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
unsigned int temp; unsigned int temp;
BUG_ON(!dqm || !qpd);
pdd = qpd_to_pdd(qpd); pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */ /* check if sh_mem_config register already configured */
...@@ -125,9 +124,10 @@ static int register_process_cik(struct device_queue_manager *dqm, ...@@ -125,9 +124,10 @@ static int register_process_cik(struct device_queue_manager *dqm,
} else { } else {
temp = get_sh_mem_bases_nybble_64(pdd); temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
} }
pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0; return 0;
......
...@@ -67,7 +67,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) ...@@ -67,7 +67,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM. * for LDS/Scratch and GPUVM.
*/ */
BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE || WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0); top_address_nybble == 0);
return top_address_nybble << 12 | return top_address_nybble << 12 |
...@@ -110,8 +110,6 @@ static int register_process_vi(struct device_queue_manager *dqm, ...@@ -110,8 +110,6 @@ static int register_process_vi(struct device_queue_manager *dqm,
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
unsigned int temp; unsigned int temp;
BUG_ON(!dqm || !qpd);
pdd = qpd_to_pdd(qpd); pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */ /* check if sh_mem_config register already configured */
...@@ -137,9 +135,11 @@ static int register_process_vi(struct device_queue_manager *dqm, ...@@ -137,9 +135,11 @@ static int register_process_vi(struct device_queue_manager *dqm,
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 << qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
SH_MEM_CONFIG__ADDRESS_MODE__SHIFT; SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
qpd->sh_mem_config |= 1 <<
SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
} }
pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0; return 0;
......
...@@ -59,7 +59,7 @@ static inline size_t doorbell_process_allocation(void) ...@@ -59,7 +59,7 @@ static inline size_t doorbell_process_allocation(void)
} }
/* Doorbell calculations for device init. */ /* Doorbell calculations for device init. */
void kfd_doorbell_init(struct kfd_dev *kfd) int kfd_doorbell_init(struct kfd_dev *kfd)
{ {
size_t doorbell_start_offset; size_t doorbell_start_offset;
size_t doorbell_aperture_size; size_t doorbell_aperture_size;
...@@ -95,26 +95,35 @@ void kfd_doorbell_init(struct kfd_dev *kfd) ...@@ -95,26 +95,35 @@ void kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
doorbell_process_allocation()); doorbell_process_allocation());
BUG_ON(!kfd->doorbell_kernel_ptr); if (!kfd->doorbell_kernel_ptr)
return -ENOMEM;
pr_debug("kfd: doorbell initialization:\n"); pr_debug("Doorbell initialization:\n");
pr_debug("kfd: doorbell base == 0x%08lX\n", pr_debug("doorbell base == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base); (uintptr_t)kfd->doorbell_base);
pr_debug("kfd: doorbell_id_offset == 0x%08lX\n", pr_debug("doorbell_id_offset == 0x%08lX\n",
kfd->doorbell_id_offset); kfd->doorbell_id_offset);
pr_debug("kfd: doorbell_process_limit == 0x%08lX\n", pr_debug("doorbell_process_limit == 0x%08lX\n",
doorbell_process_limit); doorbell_process_limit);
pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n", pr_debug("doorbell_kernel_offset == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base); (uintptr_t)kfd->doorbell_base);
pr_debug("kfd: doorbell aperture size == 0x%08lX\n", pr_debug("doorbell aperture size == 0x%08lX\n",
kfd->shared_resources.doorbell_aperture_size); kfd->shared_resources.doorbell_aperture_size);
pr_debug("kfd: doorbell kernel address == 0x%08lX\n", pr_debug("doorbell kernel address == 0x%08lX\n",
(uintptr_t)kfd->doorbell_kernel_ptr); (uintptr_t)kfd->doorbell_kernel_ptr);
return 0;
}
void kfd_doorbell_fini(struct kfd_dev *kfd)
{
if (kfd->doorbell_kernel_ptr)
iounmap(kfd->doorbell_kernel_ptr);
} }
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
...@@ -131,7 +140,7 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) ...@@ -131,7 +140,7 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
/* Find kfd device according to gpu id */ /* Find kfd device according to gpu id */
dev = kfd_device_by_id(vma->vm_pgoff); dev = kfd_device_by_id(vma->vm_pgoff);
if (dev == NULL) if (!dev)
return -EINVAL; return -EINVAL;
/* Calculate physical address of doorbell */ /* Calculate physical address of doorbell */
...@@ -142,12 +151,11 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) ...@@ -142,12 +151,11 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pr_debug("kfd: mapping doorbell page in %s\n" pr_debug("Mapping doorbell page\n"
" target user address == 0x%08llX\n" " target user address == 0x%08llX\n"
" physical address == 0x%08llX\n" " physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n" " vm_flags == 0x%04lX\n"
" size == 0x%04lX\n", " size == 0x%04lX\n",
__func__,
(unsigned long long) vma->vm_start, address, vma->vm_flags, (unsigned long long) vma->vm_start, address, vma->vm_flags,
doorbell_process_allocation()); doorbell_process_allocation());
...@@ -166,8 +174,6 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, ...@@ -166,8 +174,6 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
{ {
u32 inx; u32 inx;
BUG_ON(!kfd || !doorbell_off);
mutex_lock(&kfd->doorbell_mutex); mutex_lock(&kfd->doorbell_mutex);
inx = find_first_zero_bit(kfd->doorbell_available_index, inx = find_first_zero_bit(kfd->doorbell_available_index,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
...@@ -185,7 +191,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, ...@@ -185,7 +191,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() / *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
sizeof(u32)) + inx; sizeof(u32)) + inx;
pr_debug("kfd: get kernel queue doorbell\n" pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n" " doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n", " kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
...@@ -197,8 +203,6 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) ...@@ -197,8 +203,6 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{ {
unsigned int inx; unsigned int inx;
BUG_ON(!kfd || !db_addr);
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr); inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
mutex_lock(&kfd->doorbell_mutex); mutex_lock(&kfd->doorbell_mutex);
...@@ -210,7 +214,7 @@ inline void write_kernel_doorbell(u32 __iomem *db, u32 value) ...@@ -210,7 +214,7 @@ inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
{ {
if (db) { if (db) {
writel(value, db); writel(value, db);
pr_debug("writing %d to doorbell address 0x%p\n", value, db); pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
} }
} }
......
...@@ -110,7 +110,7 @@ static bool allocate_free_slot(struct kfd_process *process, ...@@ -110,7 +110,7 @@ static bool allocate_free_slot(struct kfd_process *process,
*out_page = page; *out_page = page;
*out_slot_index = slot; *out_slot_index = slot;
pr_debug("allocated event signal slot in page %p, slot %d\n", pr_debug("Allocated event signal slot in page %p, slot %d\n",
page, slot); page, slot);
return true; return true;
...@@ -155,9 +155,9 @@ static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p) ...@@ -155,9 +155,9 @@ static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
struct signal_page, struct signal_page,
event_pages)->page_index + 1; event_pages)->page_index + 1;
pr_debug("allocated new event signal page at %p, for process %p\n", pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p); page, p);
pr_debug("page index is %d\n", page->page_index); pr_debug("Page index is %d\n", page->page_index);
list_add(&page->event_pages, &p->signal_event_pages); list_add(&page->event_pages, &p->signal_event_pages);
...@@ -194,7 +194,8 @@ static void release_event_notification_slot(struct signal_page *page, ...@@ -194,7 +194,8 @@ static void release_event_notification_slot(struct signal_page *page,
page->free_slots++; page->free_slots++;
/* We don't free signal pages, they are retained by the process /* We don't free signal pages, they are retained by the process
* and reused until it exits. */ * and reused until it exits.
*/
} }
static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p, static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
...@@ -246,7 +247,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p) ...@@ -246,7 +247,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = p->next_nonsignal_event_id; for (id = p->next_nonsignal_event_id;
id < KFD_LAST_NONSIGNAL_EVENT_ID && id < KFD_LAST_NONSIGNAL_EVENT_ID &&
lookup_event_by_id(p, id) != NULL; lookup_event_by_id(p, id);
id++) id++)
; ;
...@@ -265,7 +266,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p) ...@@ -265,7 +266,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = KFD_FIRST_NONSIGNAL_EVENT_ID; for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
id < KFD_LAST_NONSIGNAL_EVENT_ID && id < KFD_LAST_NONSIGNAL_EVENT_ID &&
lookup_event_by_id(p, id) != NULL; lookup_event_by_id(p, id);
id++) id++)
; ;
...@@ -291,13 +292,13 @@ static int create_signal_event(struct file *devkfd, ...@@ -291,13 +292,13 @@ static int create_signal_event(struct file *devkfd,
struct kfd_event *ev) struct kfd_event *ev)
{ {
if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
pr_warn("amdkfd: Signal event wasn't created because limit was reached\n"); pr_warn("Signal event wasn't created because limit was reached\n");
return -ENOMEM; return -ENOMEM;
} }
if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page, if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
&ev->signal_slot_index)) { &ev->signal_slot_index)) {
pr_warn("amdkfd: Signal event wasn't created because out of kernel memory\n"); pr_warn("Signal event wasn't created because out of kernel memory\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -309,11 +310,7 @@ static int create_signal_event(struct file *devkfd, ...@@ -309,11 +310,7 @@ static int create_signal_event(struct file *devkfd,
ev->event_id = make_signal_event_id(ev->signal_page, ev->event_id = make_signal_event_id(ev->signal_page,
ev->signal_slot_index); ev->signal_slot_index);
pr_debug("signal event number %zu created with id %d, address %p\n", pr_debug("Signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id,
ev->user_signal_address);
pr_debug("signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id, p->signal_event_count, ev->event_id,
ev->user_signal_address); ev->user_signal_address);
...@@ -345,7 +342,7 @@ void kfd_event_init_process(struct kfd_process *p) ...@@ -345,7 +342,7 @@ void kfd_event_init_process(struct kfd_process *p)
static void destroy_event(struct kfd_process *p, struct kfd_event *ev) static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{ {
if (ev->signal_page != NULL) { if (ev->signal_page) {
release_event_notification_slot(ev->signal_page, release_event_notification_slot(ev->signal_page,
ev->signal_slot_index); ev->signal_slot_index);
p->signal_event_count--; p->signal_event_count--;
...@@ -584,7 +581,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, ...@@ -584,7 +581,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
* search faster. * search faster.
*/ */
struct signal_page *page; struct signal_page *page;
unsigned i; unsigned int i;
list_for_each_entry(page, &p->signal_event_pages, event_pages) list_for_each_entry(page, &p->signal_event_pages, event_pages)
for (i = 0; i < SLOTS_PER_PAGE; i++) for (i = 0; i < SLOTS_PER_PAGE; i++)
...@@ -816,7 +813,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma) ...@@ -816,7 +813,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
/* check required size is logical */ /* check required size is logical */
if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) != if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
get_order(vma->vm_end - vma->vm_start)) { get_order(vma->vm_end - vma->vm_start)) {
pr_err("amdkfd: event page mmap requested illegal size\n"); pr_err("Event page mmap requested illegal size\n");
return -EINVAL; return -EINVAL;
} }
...@@ -825,7 +822,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma) ...@@ -825,7 +822,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
page = lookup_signal_page_by_index(p, page_index); page = lookup_signal_page_by_index(p, page_index);
if (!page) { if (!page) {
/* Probably KFD bug, but mmap is user-accessible. */ /* Probably KFD bug, but mmap is user-accessible. */
pr_debug("signal page could not be found for page_index %u\n", pr_debug("Signal page could not be found for page_index %u\n",
page_index); page_index);
return -EINVAL; return -EINVAL;
} }
...@@ -836,7 +833,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma) ...@@ -836,7 +833,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
| VM_DONTDUMP | VM_PFNMAP; | VM_DONTDUMP | VM_PFNMAP;
pr_debug("mapping signal page\n"); pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start); pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
pr_debug(" end user address == 0x%08lx\n", vma->vm_end); pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
pr_debug(" pfn == 0x%016lX\n", pfn); pr_debug(" pfn == 0x%016lX\n", pfn);
......
...@@ -304,7 +304,7 @@ int kfd_init_apertures(struct kfd_process *process) ...@@ -304,7 +304,7 @@ int kfd_init_apertures(struct kfd_process *process)
id < NUM_OF_SUPPORTED_GPUS) { id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_create_process_device_data(dev, process); pdd = kfd_create_process_device_data(dev, process);
if (pdd == NULL) { if (!pdd) {
pr_err("Failed to create process device data\n"); pr_err("Failed to create process device data\n");
return -1; return -1;
} }
......
...@@ -179,7 +179,7 @@ static void interrupt_wq(struct work_struct *work) ...@@ -179,7 +179,7 @@ static void interrupt_wq(struct work_struct *work)
bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry) bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
{ {
/* integer and bitwise OR so there is no boolean short-circuiting */ /* integer and bitwise OR so there is no boolean short-circuiting */
unsigned wanted = 0; unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev, wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
ih_ring_entry); ih_ring_entry);
......
...@@ -41,11 +41,11 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -41,11 +41,11 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
int retval; int retval;
union PM4_MES_TYPE_3_HEADER nop; union PM4_MES_TYPE_3_HEADER nop;
BUG_ON(!kq || !dev); if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); return false;
pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
__func__, KFD_QUEUE_TYPE_HIQ, queue_size); queue_size);
memset(&prop, 0, sizeof(prop)); memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop)); memset(&nop, 0, sizeof(nop));
...@@ -63,23 +63,23 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -63,23 +63,23 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
KFD_MQD_TYPE_HIQ); KFD_MQD_TYPE_HIQ);
break; break;
default: default:
BUG(); pr_err("Invalid queue type %d\n", type);
break; return false;
} }
if (kq->mqd == NULL) if (!kq->mqd)
return false; return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
if (prop.doorbell_ptr == NULL) { if (!prop.doorbell_ptr) {
pr_err("amdkfd: error init doorbell"); pr_err("Failed to initialize doorbell");
goto err_get_kernel_doorbell; goto err_get_kernel_doorbell;
} }
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0) { if (retval != 0) {
pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); pr_err("Failed to init pq queues size %d\n", queue_size);
goto err_pq_allocate_vidmem; goto err_pq_allocate_vidmem;
} }
...@@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_gpu_addr = kq->pq->gpu_addr; kq->pq_gpu_addr = kq->pq->gpu_addr;
retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size); retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
if (retval == false) if (!retval)
goto err_eop_allocate_vidmem; goto err_eop_allocate_vidmem;
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel), retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
...@@ -139,11 +139,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -139,11 +139,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
/* assign HIQ to HQD */ /* assign HIQ to HQD */
if (type == KFD_QUEUE_TYPE_HIQ) { if (type == KFD_QUEUE_TYPE_HIQ) {
pr_debug("assigning hiq to hqd\n"); pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE; kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE; kq->queue->queue = KFD_CIK_HIQ_QUEUE;
kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe, kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
kq->queue->queue, NULL); kq->queue->queue, &kq->queue->properties,
NULL);
} else { } else {
/* allocate fence for DIQ */ /* allocate fence for DIQ */
...@@ -180,8 +181,6 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -180,8 +181,6 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
static void uninitialize(struct kernel_queue *kq) static void uninitialize(struct kernel_queue *kq)
{ {
BUG_ON(!kq);
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
kq->mqd->destroy_mqd(kq->mqd, kq->mqd->destroy_mqd(kq->mqd,
NULL, NULL,
...@@ -211,8 +210,6 @@ static int acquire_packet_buffer(struct kernel_queue *kq, ...@@ -211,8 +210,6 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
uint32_t wptr, rptr; uint32_t wptr, rptr;
unsigned int *queue_address; unsigned int *queue_address;
BUG_ON(!kq || !buffer_ptr);
rptr = *kq->rptr_kernel; rptr = *kq->rptr_kernel;
wptr = *kq->wptr_kernel; wptr = *kq->wptr_kernel;
queue_address = (unsigned int *)kq->pq_kernel_addr; queue_address = (unsigned int *)kq->pq_kernel_addr;
...@@ -252,11 +249,7 @@ static void submit_packet(struct kernel_queue *kq) ...@@ -252,11 +249,7 @@ static void submit_packet(struct kernel_queue *kq)
{ {
#ifdef DEBUG #ifdef DEBUG
int i; int i;
#endif
BUG_ON(!kq);
#ifdef DEBUG
for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) { for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
pr_debug("0x%2X ", kq->pq_kernel_addr[i]); pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
if (i % 15 == 0) if (i % 15 == 0)
...@@ -272,7 +265,6 @@ static void submit_packet(struct kernel_queue *kq) ...@@ -272,7 +265,6 @@ static void submit_packet(struct kernel_queue *kq)
static void rollback_packet(struct kernel_queue *kq) static void rollback_packet(struct kernel_queue *kq)
{ {
BUG_ON(!kq);
kq->pending_wptr = *kq->queue->properties.write_ptr; kq->pending_wptr = *kq->queue->properties.write_ptr;
} }
...@@ -281,9 +273,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, ...@@ -281,9 +273,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
{ {
struct kernel_queue *kq; struct kernel_queue *kq;
BUG_ON(!dev); kq = kzalloc(sizeof(*kq), GFP_KERNEL);
kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL);
if (!kq) if (!kq)
return NULL; return NULL;
...@@ -304,7 +294,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, ...@@ -304,7 +294,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
} }
if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) { if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
pr_err("amdkfd: failed to init kernel queue\n"); pr_err("Failed to init kernel queue\n");
kfree(kq); kfree(kq);
return NULL; return NULL;
} }
...@@ -313,32 +303,37 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, ...@@ -313,32 +303,37 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
void kernel_queue_uninit(struct kernel_queue *kq) void kernel_queue_uninit(struct kernel_queue *kq)
{ {
BUG_ON(!kq);
kq->ops.uninitialize(kq); kq->ops.uninitialize(kq);
kfree(kq); kfree(kq);
} }
/* FIXME: Can this test be removed? */
static __attribute__((unused)) void test_kq(struct kfd_dev *dev) static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
{ {
struct kernel_queue *kq; struct kernel_queue *kq;
uint32_t *buffer, i; uint32_t *buffer, i;
int retval; int retval;
BUG_ON(!dev); pr_err("Starting kernel queue test\n");
pr_err("amdkfd: starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
BUG_ON(!kq); if (unlikely(!kq)) {
pr_err(" Failed to initialize HIQ\n");
pr_err("Kernel queue test failed\n");
return;
}
retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer); retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
BUG_ON(retval != 0); if (unlikely(retval != 0)) {
pr_err(" Failed to acquire packet buffer\n");
pr_err("Kernel queue test failed\n");
return;
}
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet; buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq); kq->ops.submit_packet(kq);
pr_err("amdkfd: ending kernel queue test\n"); pr_err("Ending kernel queue test\n");
} }
...@@ -61,7 +61,8 @@ MODULE_PARM_DESC(send_sigterm, ...@@ -61,7 +61,8 @@ MODULE_PARM_DESC(send_sigterm,
static int amdkfd_init_completed; static int amdkfd_init_completed;
int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) int kgd2kfd_init(unsigned int interface_version,
const struct kgd2kfd_calls **g2f)
{ {
if (!amdkfd_init_completed) if (!amdkfd_init_completed)
return -EPROBE_DEFER; return -EPROBE_DEFER;
...@@ -90,7 +91,7 @@ static int __init kfd_module_init(void) ...@@ -90,7 +91,7 @@ static int __init kfd_module_init(void)
/* Verify module parameters */ /* Verify module parameters */
if ((sched_policy < KFD_SCHED_POLICY_HWS) || if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
(sched_policy > KFD_SCHED_POLICY_NO_HWS)) { (sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
pr_err("kfd: sched_policy has invalid value\n"); pr_err("sched_policy has invalid value\n");
return -1; return -1;
} }
...@@ -98,13 +99,13 @@ static int __init kfd_module_init(void) ...@@ -98,13 +99,13 @@ static int __init kfd_module_init(void)
if ((max_num_of_queues_per_device < 1) || if ((max_num_of_queues_per_device < 1) ||
(max_num_of_queues_per_device > (max_num_of_queues_per_device >
KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); pr_err("max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1; return -1;
} }
err = kfd_pasid_init(); err = kfd_pasid_init();
if (err < 0) if (err < 0)
goto err_pasid; return err;
err = kfd_chardev_init(); err = kfd_chardev_init();
if (err < 0) if (err < 0)
...@@ -126,7 +127,6 @@ static int __init kfd_module_init(void) ...@@ -126,7 +127,6 @@ static int __init kfd_module_init(void)
kfd_chardev_exit(); kfd_chardev_exit();
err_ioctl: err_ioctl:
kfd_pasid_exit(); kfd_pasid_exit();
err_pasid:
return err; return err;
} }
......
...@@ -67,7 +67,8 @@ struct mqd_manager { ...@@ -67,7 +67,8 @@ struct mqd_manager {
int (*load_mqd)(struct mqd_manager *mm, void *mqd, int (*load_mqd)(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id, uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr); struct queue_properties *p,
struct mm_struct *mms);
int (*update_mqd)(struct mqd_manager *mm, void *mqd, int (*update_mqd)(struct mqd_manager *mm, void *mqd,
struct queue_properties *q); struct queue_properties *q);
......
...@@ -44,10 +44,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ...@@ -44,10 +44,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
struct cik_mqd *m; struct cik_mqd *m;
int retval; int retval;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj); mqd_mem_obj);
...@@ -101,7 +97,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ...@@ -101,7 +97,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = AQL_ENABLE; m->cp_hqd_iq_rptr = AQL_ENABLE;
*mqd = m; *mqd = m;
if (gart_addr != NULL) if (gart_addr)
*gart_addr = addr; *gart_addr = addr;
retval = mm->update_mqd(mm, m, q); retval = mm->update_mqd(mm, m, q);
...@@ -115,8 +111,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, ...@@ -115,8 +111,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
int retval; int retval;
struct cik_sdma_rlc_registers *m; struct cik_sdma_rlc_registers *m;
BUG_ON(!mm || !mqd || !mqd_mem_obj);
retval = kfd_gtt_sa_allocate(mm->dev, retval = kfd_gtt_sa_allocate(mm->dev,
sizeof(struct cik_sdma_rlc_registers), sizeof(struct cik_sdma_rlc_registers),
mqd_mem_obj); mqd_mem_obj);
...@@ -129,7 +123,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, ...@@ -129,7 +123,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
memset(m, 0, sizeof(struct cik_sdma_rlc_registers)); memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
*mqd = m; *mqd = m;
if (gart_addr != NULL) if (gart_addr)
*gart_addr = (*mqd_mem_obj)->gpu_addr; *gart_addr = (*mqd_mem_obj)->gpu_addr;
retval = mm->update_mqd(mm, m, q); retval = mm->update_mqd(mm, m, q);
...@@ -140,27 +134,31 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, ...@@ -140,27 +134,31 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
static void uninit_mqd(struct mqd_manager *mm, void *mqd, static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj) struct kfd_mem_obj *mqd_mem_obj)
{ {
BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj); kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
} }
static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj) struct kfd_mem_obj *mqd_mem_obj)
{ {
BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj); kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
} }
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr) uint32_t queue_id, struct queue_properties *p,
struct mm_struct *mms)
{ {
return mm->dev->kfd2kgd->hqd_load /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
(mm->dev->kgd, mqd, pipe_id, queue_id, wptr); uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, wptr_mask, mms);
} }
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id, uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr) struct queue_properties *p, struct mm_struct *mms)
{ {
return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd); return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
} }
...@@ -170,10 +168,6 @@ static int update_mqd(struct mqd_manager *mm, void *mqd, ...@@ -170,10 +168,6 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
{ {
struct cik_mqd *m; struct cik_mqd *m;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd); m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN; DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
...@@ -188,21 +182,17 @@ static int update_mqd(struct mqd_manager *mm, void *mqd, ...@@ -188,21 +182,17 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control = DOORBELL_EN | m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid; m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) { if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR; m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
}
m->cp_hqd_active = 0;
q->is_active = false; q->is_active = false;
if (q->queue_size > 0 && if (q->queue_size > 0 &&
q->queue_address != 0 && q->queue_address != 0 &&
q->queue_percent > 0) { q->queue_percent > 0) {
m->cp_hqd_active = 1;
q->is_active = true; q->is_active = true;
} }
...@@ -214,8 +204,6 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, ...@@ -214,8 +204,6 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
{ {
struct cik_sdma_rlc_registers *m; struct cik_sdma_rlc_registers *m;
BUG_ON(!mm || !mqd || !q);
m = get_sdma_mqd(mqd); m = get_sdma_mqd(mqd);
m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
...@@ -254,7 +242,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, ...@@ -254,7 +242,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id, unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id) uint32_t queue_id)
{ {
return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout, return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
pipe_id, queue_id); pipe_id, queue_id);
} }
...@@ -301,10 +289,6 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, ...@@ -301,10 +289,6 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct cik_mqd *m; struct cik_mqd *m;
int retval; int retval;
BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
pr_debug("kfd: In func %s\n", __func__);
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj); mqd_mem_obj);
...@@ -359,10 +343,6 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, ...@@ -359,10 +343,6 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
{ {
struct cik_mqd *m; struct cik_mqd *m;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd); m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | DEFAULT_MIN_AVAIL_SIZE |
...@@ -400,8 +380,6 @@ struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) ...@@ -400,8 +380,6 @@ struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
{ {
struct cik_sdma_rlc_registers *m; struct cik_sdma_rlc_registers *m;
BUG_ON(!mqd);
m = (struct cik_sdma_rlc_registers *)mqd; m = (struct cik_sdma_rlc_registers *)mqd;
return m; return m;
...@@ -412,12 +390,10 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, ...@@ -412,12 +390,10 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
{ {
struct mqd_manager *mqd; struct mqd_manager *mqd;
BUG_ON(!dev); if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
BUG_ON(type >= KFD_MQD_TYPE_MAX); return NULL;
pr_debug("kfd: In func %s\n", __func__);
mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL); mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd) if (!mqd)
return NULL; return NULL;
......
...@@ -85,7 +85,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ...@@ -85,7 +85,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = 1; m->cp_hqd_iq_rptr = 1;
*mqd = m; *mqd = m;
if (gart_addr != NULL) if (gart_addr)
*gart_addr = addr; *gart_addr = addr;
retval = mm->update_mqd(mm, m, q); retval = mm->update_mqd(mm, m, q);
...@@ -94,10 +94,15 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ...@@ -94,10 +94,15 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
static int load_mqd(struct mqd_manager *mm, void *mqd, static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id, uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr) struct queue_properties *p, struct mm_struct *mms)
{ {
return mm->dev->kfd2kgd->hqd_load /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
(mm->dev->kgd, mqd, pipe_id, queue_id, wptr); uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, wptr_mask, mms);
} }
static int __update_mqd(struct mqd_manager *mm, void *mqd, static int __update_mqd(struct mqd_manager *mm, void *mqd,
...@@ -106,10 +111,6 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd, ...@@ -106,10 +111,6 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
{ {
struct vi_mqd *m; struct vi_mqd *m;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd); m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT | m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
...@@ -117,7 +118,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd, ...@@ -117,7 +118,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT; mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
m->cp_hqd_pq_control |= m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
...@@ -126,10 +127,9 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd, ...@@ -126,10 +127,9 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control = m->cp_hqd_pq_doorbell_control =
1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
q->doorbell_off << q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n", pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control); m->cp_hqd_pq_doorbell_control);
m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT | m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
...@@ -139,8 +139,15 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd, ...@@ -139,8 +139,15 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT | 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT; mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
m->cp_hqd_eop_control |= /*
ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1; * HW does not clamp this field correctly. Maximum EOP queue size
* is constrained by per-SE EOP done signal count, which is 8-bit.
* Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
* more than (EOP entry count - 1) so a queue size of 0x800 dwords
* is safe, giving a maximum field value of 0xA.
*/
m->cp_hqd_eop_control |= min(0xA,
ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
m->cp_hqd_eop_base_addr_lo = m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8); lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi = m->cp_hqd_eop_base_addr_hi =
...@@ -156,12 +163,10 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd, ...@@ -156,12 +163,10 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT; 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
} }
m->cp_hqd_active = 0;
q->is_active = false; q->is_active = false;
if (q->queue_size > 0 && if (q->queue_size > 0 &&
q->queue_address != 0 && q->queue_address != 0 &&
q->queue_percent > 0) { q->queue_percent > 0) {
m->cp_hqd_active = 1;
q->is_active = true; q->is_active = true;
} }
...@@ -181,14 +186,13 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, ...@@ -181,14 +186,13 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id) uint32_t queue_id)
{ {
return mm->dev->kfd2kgd->hqd_destroy return mm->dev->kfd2kgd->hqd_destroy
(mm->dev->kgd, type, timeout, (mm->dev->kgd, mqd, type, timeout,
pipe_id, queue_id); pipe_id, queue_id);
} }
static void uninit_mqd(struct mqd_manager *mm, void *mqd, static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj) struct kfd_mem_obj *mqd_mem_obj)
{ {
BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj); kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
} }
...@@ -238,12 +242,10 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, ...@@ -238,12 +242,10 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
{ {
struct mqd_manager *mqd; struct mqd_manager *mqd;
BUG_ON(!dev); if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
BUG_ON(type >= KFD_MQD_TYPE_MAX); return NULL;
pr_debug("kfd: In func %s\n", __func__);
mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL); mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd) if (!mqd)
return NULL; return NULL;
......
...@@ -32,7 +32,8 @@ int kfd_pasid_init(void) ...@@ -32,7 +32,8 @@ int kfd_pasid_init(void)
{ {
pasid_limit = KFD_MAX_NUM_OF_PROCESSES; pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
GFP_KERNEL);
if (!pasid_bitmap) if (!pasid_bitmap)
return -ENOMEM; return -ENOMEM;
...@@ -91,6 +92,6 @@ unsigned int kfd_pasid_alloc(void) ...@@ -91,6 +92,6 @@ unsigned int kfd_pasid_alloc(void)
void kfd_pasid_free(unsigned int pasid) void kfd_pasid_free(unsigned int pasid)
{ {
BUG_ON(pasid == 0 || pasid >= pasid_limit); if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
clear_bit(pasid, pasid_bitmap); clear_bit(pasid, pasid_bitmap);
} }
...@@ -28,112 +28,19 @@ ...@@ -28,112 +28,19 @@
#define PM4_MES_HEADER_DEFINED #define PM4_MES_HEADER_DEFINED
union PM4_MES_TYPE_3_HEADER { union PM4_MES_TYPE_3_HEADER {
struct { struct {
uint32_t reserved1:8; /* < reserved */ /* reserved */
uint32_t opcode:8; /* < IT opcode */ uint32_t reserved1:8;
uint32_t count:14; /* < number of DWORDs - 1 /* IT opcode */
* in the information body. uint32_t opcode:8;
*/ /* number of DWORDs - 1 in the information body */
uint32_t type:2; /* < packet identifier. uint32_t count:14;
* It should be 3 for type 3 packets /* packet identifier. It should be 3 for type 3 packets */
*/ uint32_t type:2;
}; };
uint32_t u32all; uint32_t u32all;
}; };
#endif /* PM4_MES_HEADER_DEFINED */ #endif /* PM4_MES_HEADER_DEFINED */
/* --------------------MES_SET_RESOURCES-------------------- */
#ifndef PM4_MES_SET_RESOURCES_DEFINED
#define PM4_MES_SET_RESOURCES_DEFINED
enum set_resources_queue_type_enum {
queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
queue_type__mes_set_resources__hsa_debug_interface_queue = 4
};
struct pm4_set_resources {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
};
union {
struct {
uint32_t vmid_mask:16;
uint32_t unmap_latency:8;
uint32_t reserved1:5;
enum set_resources_queue_type_enum queue_type:3;
} bitfields2;
uint32_t ordinal2;
};
uint32_t queue_mask_lo;
uint32_t queue_mask_hi;
uint32_t gws_mask_lo;
uint32_t gws_mask_hi;
union {
struct {
uint32_t oac_mask:16;
uint32_t reserved2:16;
} bitfields7;
uint32_t ordinal7;
};
union {
struct {
uint32_t gds_heap_base:6;
uint32_t reserved3:5;
uint32_t gds_heap_size:6;
uint32_t reserved4:15;
} bitfields8;
uint32_t ordinal8;
};
};
#endif
/*--------------------MES_RUN_LIST-------------------- */
#ifndef PM4_MES_RUN_LIST_DEFINED
#define PM4_MES_RUN_LIST_DEFINED
struct pm4_runlist {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
};
union {
struct {
uint32_t reserved1:2;
uint32_t ib_base_lo:30;
} bitfields2;
uint32_t ordinal2;
};
union {
struct {
uint32_t ib_base_hi:16;
uint32_t reserved2:16;
} bitfields3;
uint32_t ordinal3;
};
union {
struct {
uint32_t ib_size:20;
uint32_t chain:1;
uint32_t offload_polling:1;
uint32_t reserved3:1;
uint32_t valid:1;
uint32_t reserved4:8;
} bitfields4;
uint32_t ordinal4;
};
};
#endif
/*--------------------MES_MAP_PROCESS-------------------- */ /*--------------------MES_MAP_PROCESS-------------------- */
...@@ -186,217 +93,58 @@ struct pm4_map_process { ...@@ -186,217 +93,58 @@ struct pm4_map_process {
}; };
#endif #endif
/*--------------------MES_MAP_QUEUES--------------------*/ #ifndef PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
#define PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
#ifndef PM4_MES_MAP_QUEUES_DEFINED
#define PM4_MES_MAP_QUEUES_DEFINED
enum map_queues_queue_sel_enum {
queue_sel__mes_map_queues__map_to_specified_queue_slots = 0,
queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1,
queue_sel__mes_map_queues__enable_process_queues = 2
};
enum map_queues_vidmem_enum { struct pm4_map_process_scratch_kv {
vidmem__mes_map_queues__uses_no_video_memory = 0,
vidmem__mes_map_queues__uses_video_memory = 1
};
enum map_queues_alloc_format_enum {
alloc_format__mes_map_queues__one_per_pipe = 0,
alloc_format__mes_map_queues__all_on_one_pipe = 1
};
enum map_queues_engine_sel_enum {
engine_sel__mes_map_queues__compute = 0,
engine_sel__mes_map_queues__sdma0 = 2,
engine_sel__mes_map_queues__sdma1 = 3
};
struct pm4_map_queues {
union { union {
union PM4_MES_TYPE_3_HEADER header; /* header */ union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1; uint32_t ordinal1;
};
union {
struct {
uint32_t reserved1:4;
enum map_queues_queue_sel_enum queue_sel:2;
uint32_t reserved2:2;
uint32_t vmid:4;
uint32_t reserved3:4;
enum map_queues_vidmem_enum vidmem:2;
uint32_t reserved4:6;
enum map_queues_alloc_format_enum alloc_format:2;
enum map_queues_engine_sel_enum engine_sel:3;
uint32_t num_queues:3;
} bitfields2;
uint32_t ordinal2;
};
struct {
union {
struct {
uint32_t is_static:1;
uint32_t reserved5:1;
uint32_t doorbell_offset:21;
uint32_t reserved6:3;
uint32_t queue:6;
} bitfields3;
uint32_t ordinal3;
};
uint32_t mqd_addr_lo;
uint32_t mqd_addr_hi;
uint32_t wptr_addr_lo;
uint32_t wptr_addr_hi;
} mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */
};
#endif
/*--------------------MES_QUERY_STATUS--------------------*/
#ifndef PM4_MES_QUERY_STATUS_DEFINED
#define PM4_MES_QUERY_STATUS_DEFINED
enum query_status_interrupt_sel_enum {
interrupt_sel__mes_query_status__completion_status = 0,
interrupt_sel__mes_query_status__process_status = 1,
interrupt_sel__mes_query_status__queue_status = 2
};
enum query_status_command_enum {
command__mes_query_status__interrupt_only = 0,
command__mes_query_status__fence_only_immediate = 1,
command__mes_query_status__fence_only_after_write_ack = 2,
command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
};
enum query_status_engine_sel_enum {
engine_sel__mes_query_status__compute = 0,
engine_sel__mes_query_status__sdma0_queue = 2,
engine_sel__mes_query_status__sdma1_queue = 3
};
struct pm4_query_status {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
};
union {
struct {
uint32_t context_id:28;
enum query_status_interrupt_sel_enum interrupt_sel:2;
enum query_status_command_enum command:2;
} bitfields2;
uint32_t ordinal2;
}; };
union { union {
struct { struct {
uint32_t pasid:16; uint32_t pasid:16;
uint32_t reserved1:16; uint32_t reserved1:8;
} bitfields3a; uint32_t diq_enable:1;
struct { uint32_t process_quantum:7;
uint32_t reserved2:2;
uint32_t doorbell_offset:21;
uint32_t reserved3:3;
enum query_status_engine_sel_enum engine_sel:3;
uint32_t reserved4:3;
} bitfields3b;
uint32_t ordinal3;
};
uint32_t addr_lo;
uint32_t addr_hi;
uint32_t data_lo;
uint32_t data_hi;
};
#endif
/*--------------------MES_UNMAP_QUEUES--------------------*/
#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
#define PM4_MES_UNMAP_QUEUES_DEFINED
enum unmap_queues_action_enum {
action__mes_unmap_queues__preempt_queues = 0,
action__mes_unmap_queues__reset_queues = 1,
action__mes_unmap_queues__disable_process_queues = 2
};
enum unmap_queues_queue_sel_enum {
queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2,
queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only = 3
};
enum unmap_queues_engine_sel_enum {
engine_sel__mes_unmap_queues__compute = 0,
engine_sel__mes_unmap_queues__sdma0 = 2,
engine_sel__mes_unmap_queues__sdma1 = 3
};
struct pm4_unmap_queues {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
};
union {
struct {
enum unmap_queues_action_enum action:2;
uint32_t reserved1:2;
enum unmap_queues_queue_sel_enum queue_sel:2;
uint32_t reserved2:20;
enum unmap_queues_engine_sel_enum engine_sel:3;
uint32_t num_queues:3;
} bitfields2; } bitfields2;
uint32_t ordinal2; uint32_t ordinal2;
}; };
union { union {
struct { struct {
uint32_t pasid:16; uint32_t page_table_base:28;
uint32_t reserved3:16; uint32_t reserved2:4;
} bitfields3a; } bitfields3;
struct {
uint32_t reserved4:2;
uint32_t doorbell_offset0:21;
uint32_t reserved5:9;
} bitfields3b;
uint32_t ordinal3; uint32_t ordinal3;
}; };
union { uint32_t reserved3;
struct { uint32_t sh_mem_bases;
uint32_t reserved6:2; uint32_t sh_mem_config;
uint32_t doorbell_offset1:21; uint32_t sh_mem_ape1_base;
uint32_t reserved7:9; uint32_t sh_mem_ape1_limit;
} bitfields4; uint32_t sh_hidden_private_base_vmid;
uint32_t ordinal4; uint32_t reserved4;
}; uint32_t reserved5;
uint32_t gds_addr_lo;
union { uint32_t gds_addr_hi;
struct {
uint32_t reserved8:2;
uint32_t doorbell_offset2:21;
uint32_t reserved9:9;
} bitfields5;
uint32_t ordinal5;
};
union { union {
struct { struct {
uint32_t reserved10:2; uint32_t num_gws:6;
uint32_t doorbell_offset3:21; uint32_t reserved6:2;
uint32_t reserved11:9; uint32_t num_oac:4;
} bitfields6; uint32_t reserved7:4;
uint32_t ordinal6; uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields14;
uint32_t ordinal14;
}; };
uint32_t completion_signal_lo32;
uint32_t completion_signal_hi32;
}; };
#endif #endif
......
...@@ -30,10 +30,12 @@ union PM4_MES_TYPE_3_HEADER { ...@@ -30,10 +30,12 @@ union PM4_MES_TYPE_3_HEADER {
struct { struct {
uint32_t reserved1 : 8; /* < reserved */ uint32_t reserved1 : 8; /* < reserved */
uint32_t opcode : 8; /* < IT opcode */ uint32_t opcode : 8; /* < IT opcode */
uint32_t count : 14;/* < number of DWORDs - 1 in the uint32_t count : 14;/* < Number of DWORDS - 1 in the
information body. */ * information body
uint32_t type : 2; /* < packet identifier. */
It should be 3 for type 3 packets */ uint32_t type : 2; /* < packet identifier
* It should be 3 for type 3 packets
*/
}; };
uint32_t u32All; uint32_t u32All;
}; };
...@@ -124,9 +126,10 @@ struct pm4_mes_runlist { ...@@ -124,9 +126,10 @@ struct pm4_mes_runlist {
uint32_t ib_size:20; uint32_t ib_size:20;
uint32_t chain:1; uint32_t chain:1;
uint32_t offload_polling:1; uint32_t offload_polling:1;
uint32_t reserved3:1; uint32_t reserved2:1;
uint32_t valid:1; uint32_t valid:1;
uint32_t reserved4:8; uint32_t process_cnt:4;
uint32_t reserved3:4;
} bitfields4; } bitfields4;
uint32_t ordinal4; uint32_t ordinal4;
}; };
...@@ -141,8 +144,8 @@ struct pm4_mes_runlist { ...@@ -141,8 +144,8 @@ struct pm4_mes_runlist {
struct pm4_mes_map_process { struct pm4_mes_map_process {
union { union {
union PM4_MES_TYPE_3_HEADER header; /* header */ union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1; uint32_t ordinal1;
}; };
union { union {
...@@ -153,36 +156,48 @@ struct pm4_mes_map_process { ...@@ -153,36 +156,48 @@ struct pm4_mes_map_process {
uint32_t process_quantum:7; uint32_t process_quantum:7;
} bitfields2; } bitfields2;
uint32_t ordinal2; uint32_t ordinal2;
}; };
union { union {
struct { struct {
uint32_t page_table_base:28; uint32_t page_table_base:28;
uint32_t reserved2:4; uint32_t reserved3:4;
} bitfields3; } bitfields3;
uint32_t ordinal3; uint32_t ordinal3;
}; };
uint32_t reserved;
uint32_t sh_mem_bases; uint32_t sh_mem_bases;
uint32_t sh_mem_config;
uint32_t sh_mem_ape1_base; uint32_t sh_mem_ape1_base;
uint32_t sh_mem_ape1_limit; uint32_t sh_mem_ape1_limit;
uint32_t sh_mem_config;
uint32_t sh_hidden_private_base_vmid;
uint32_t reserved2;
uint32_t reserved3;
uint32_t gds_addr_lo; uint32_t gds_addr_lo;
uint32_t gds_addr_hi; uint32_t gds_addr_hi;
union { union {
struct { struct {
uint32_t num_gws:6; uint32_t num_gws:6;
uint32_t reserved3:2; uint32_t reserved4:2;
uint32_t num_oac:4; uint32_t num_oac:4;
uint32_t reserved4:4; uint32_t reserved5:4;
uint32_t gds_size:6; uint32_t gds_size:6;
uint32_t num_queues:10; uint32_t num_queues:10;
} bitfields10; } bitfields10;
uint32_t ordinal10; uint32_t ordinal10;
}; };
uint32_t completion_signal_lo;
uint32_t completion_signal_hi;
}; };
#endif #endif
/*--------------------MES_MAP_QUEUES--------------------*/ /*--------------------MES_MAP_QUEUES--------------------*/
...@@ -335,7 +350,7 @@ enum mes_unmap_queues_engine_sel_enum { ...@@ -335,7 +350,7 @@ enum mes_unmap_queues_engine_sel_enum {
engine_sel__mes_unmap_queues__sdmal = 3 engine_sel__mes_unmap_queues__sdmal = 3
}; };
struct PM4_MES_UNMAP_QUEUES { struct pm4_mes_unmap_queues {
union { union {
union PM4_MES_TYPE_3_HEADER header; /* header */ union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1; uint32_t ordinal1;
...@@ -395,4 +410,101 @@ struct PM4_MES_UNMAP_QUEUES { ...@@ -395,4 +410,101 @@ struct PM4_MES_UNMAP_QUEUES {
}; };
#endif #endif
#ifndef PM4_MEC_RELEASE_MEM_DEFINED
#define PM4_MEC_RELEASE_MEM_DEFINED
enum RELEASE_MEM_event_index_enum {
event_index___release_mem__end_of_pipe = 5,
event_index___release_mem__shader_done = 6
};
enum RELEASE_MEM_cache_policy_enum {
cache_policy___release_mem__lru = 0,
cache_policy___release_mem__stream = 1,
cache_policy___release_mem__bypass = 2
};
enum RELEASE_MEM_dst_sel_enum {
dst_sel___release_mem__memory_controller = 0,
dst_sel___release_mem__tc_l2 = 1,
dst_sel___release_mem__queue_write_pointer_register = 2,
dst_sel___release_mem__queue_write_pointer_poll_mask_bit = 3
};
enum RELEASE_MEM_int_sel_enum {
int_sel___release_mem__none = 0,
int_sel___release_mem__send_interrupt_only = 1,
int_sel___release_mem__send_interrupt_after_write_confirm = 2,
int_sel___release_mem__send_data_after_write_confirm = 3
};
enum RELEASE_MEM_data_sel_enum {
data_sel___release_mem__none = 0,
data_sel___release_mem__send_32_bit_low = 1,
data_sel___release_mem__send_64_bit_data = 2,
data_sel___release_mem__send_gpu_clock_counter = 3,
data_sel___release_mem__send_cp_perfcounter_hi_lo = 4,
data_sel___release_mem__store_gds_data_to_memory = 5
};
struct pm4_mec_release_mem {
union {
union PM4_MES_TYPE_3_HEADER header; /*header */
unsigned int ordinal1;
};
union {
struct {
unsigned int event_type:6;
unsigned int reserved1:2;
enum RELEASE_MEM_event_index_enum event_index:4;
unsigned int tcl1_vol_action_ena:1;
unsigned int tc_vol_action_ena:1;
unsigned int reserved2:1;
unsigned int tc_wb_action_ena:1;
unsigned int tcl1_action_ena:1;
unsigned int tc_action_ena:1;
unsigned int reserved3:6;
unsigned int atc:1;
enum RELEASE_MEM_cache_policy_enum cache_policy:2;
unsigned int reserved4:5;
} bitfields2;
unsigned int ordinal2;
};
union {
struct {
unsigned int reserved5:16;
enum RELEASE_MEM_dst_sel_enum dst_sel:2;
unsigned int reserved6:6;
enum RELEASE_MEM_int_sel_enum int_sel:3;
unsigned int reserved7:2;
enum RELEASE_MEM_data_sel_enum data_sel:3;
} bitfields3;
unsigned int ordinal3;
};
union {
struct {
unsigned int reserved8:2;
unsigned int address_lo_32b:30;
} bitfields4;
struct {
unsigned int reserved9:3;
unsigned int address_lo_64b:29;
} bitfields5;
unsigned int ordinal4;
};
unsigned int address_hi;
unsigned int data_lo;
unsigned int data_hi;
};
#endif
enum {
CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
};
#endif #endif
...@@ -239,11 +239,6 @@ enum kfd_preempt_type_filter { ...@@ -239,11 +239,6 @@ enum kfd_preempt_type_filter {
KFD_PREEMPT_TYPE_FILTER_BY_PASID KFD_PREEMPT_TYPE_FILTER_BY_PASID
}; };
enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET
};
/** /**
* enum kfd_queue_type * enum kfd_queue_type
* *
...@@ -294,13 +289,13 @@ enum kfd_queue_format { ...@@ -294,13 +289,13 @@ enum kfd_queue_format {
* @write_ptr: Defines the number of dwords written to the ring buffer. * @write_ptr: Defines the number of dwords written to the ring buffer.
* *
* @doorbell_ptr: This field aim is to notify the H/W of new packet written to * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
* the queue ring buffer. This field should be similar to write_ptr and the user * the queue ring buffer. This field should be similar to write_ptr and the
* should update this field after he updated the write_ptr. * user should update this field after he updated the write_ptr.
* *
* @doorbell_off: The doorbell offset in the doorbell pci-bar. * @doorbell_off: The doorbell offset in the doorbell pci-bar.
* *
* @is_interop: Defines if this is a interop queue. Interop queue means that the * @is_interop: Defines if this is a interop queue. Interop queue means that
* queue can access both graphics and compute resources. * the queue can access both graphics and compute resources.
* *
* @is_active: Defines if the queue is active or not. * @is_active: Defines if the queue is active or not.
* *
...@@ -352,9 +347,10 @@ struct queue_properties { ...@@ -352,9 +347,10 @@ struct queue_properties {
* @properties: The queue properties. * @properties: The queue properties.
* *
* @mec: Used only in no cp scheduling mode and identifies to micro engine id * @mec: Used only in no cp scheduling mode and identifies to micro engine id
* that the queue should be execute on. * that the queue should be execute on.
* *
* @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id. * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
* id.
* *
* @queue: Used only in no cp scheduliong mode and identifies the queue's slot. * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
* *
...@@ -436,6 +432,7 @@ struct qcm_process_device { ...@@ -436,6 +432,7 @@ struct qcm_process_device {
uint32_t gds_size; uint32_t gds_size;
uint32_t num_gws; uint32_t num_gws;
uint32_t num_oac; uint32_t num_oac;
uint32_t sh_hidden_private_base;
}; };
/* Data that is per-process-per device. */ /* Data that is per-process-per device. */
...@@ -520,8 +517,8 @@ struct kfd_process { ...@@ -520,8 +517,8 @@ struct kfd_process {
struct mutex event_mutex; struct mutex event_mutex;
/* All events in process hashed by ID, linked on kfd_event.events. */ /* All events in process hashed by ID, linked on kfd_event.events. */
DECLARE_HASHTABLE(events, 4); DECLARE_HASHTABLE(events, 4);
struct list_head signal_event_pages; /* struct slot_page_header. /* struct slot_page_header.event_pages */
event_pages */ struct list_head signal_event_pages;
u32 next_nonsignal_event_id; u32 next_nonsignal_event_id;
size_t signal_event_count; size_t signal_event_count;
}; };
...@@ -559,8 +556,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, ...@@ -559,8 +556,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p); struct kfd_process *p);
/* Process device data iterator */ /* Process device data iterator */
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p); struct kfd_process_device *kfd_get_first_process_device_data(
struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, struct kfd_process *p);
struct kfd_process_device *kfd_get_next_process_device_data(
struct kfd_process *p,
struct kfd_process_device *pdd); struct kfd_process_device *pdd);
bool kfd_has_process_device_data(struct kfd_process *p); bool kfd_has_process_device_data(struct kfd_process *p);
...@@ -573,7 +572,8 @@ unsigned int kfd_pasid_alloc(void); ...@@ -573,7 +572,8 @@ unsigned int kfd_pasid_alloc(void);
void kfd_pasid_free(unsigned int pasid); void kfd_pasid_free(unsigned int pasid);
/* Doorbells */ /* Doorbells */
void kfd_doorbell_init(struct kfd_dev *kfd); int kfd_doorbell_init(struct kfd_dev *kfd);
void kfd_doorbell_fini(struct kfd_dev *kfd);
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma); int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
unsigned int *doorbell_off); unsigned int *doorbell_off);
......
...@@ -79,9 +79,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread) ...@@ -79,9 +79,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
{ {
struct kfd_process *process; struct kfd_process *process;
BUG_ON(!kfd_process_wq); if (!thread->mm)
if (thread->mm == NULL)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */ /* Only the pthreads threading model is supported. */
...@@ -101,7 +99,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread) ...@@ -101,7 +99,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
/* A prior open of /dev/kfd could have already created the process. */ /* A prior open of /dev/kfd could have already created the process. */
process = find_process(thread); process = find_process(thread);
if (process) if (process)
pr_debug("kfd: process already found\n"); pr_debug("Process already found\n");
if (!process) if (!process)
process = create_process(thread); process = create_process(thread);
...@@ -117,7 +115,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread) ...@@ -117,7 +115,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
{ {
struct kfd_process *process; struct kfd_process *process;
if (thread->mm == NULL) if (!thread->mm)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */ /* Only the pthreads threading model is supported. */
...@@ -202,10 +200,8 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu) ...@@ -202,10 +200,8 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
struct kfd_process_release_work *work; struct kfd_process_release_work *work;
struct kfd_process *p; struct kfd_process *p;
BUG_ON(!kfd_process_wq);
p = container_of(rcu, struct kfd_process, rcu); p = container_of(rcu, struct kfd_process, rcu);
BUG_ON(atomic_read(&p->mm->mm_count) <= 0); WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
mmdrop(p->mm); mmdrop(p->mm);
...@@ -229,7 +225,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, ...@@ -229,7 +225,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
* mmu_notifier srcu is read locked * mmu_notifier srcu is read locked
*/ */
p = container_of(mn, struct kfd_process, mmu_notifier); p = container_of(mn, struct kfd_process, mmu_notifier);
BUG_ON(p->mm != mm); if (WARN_ON(p->mm != mm))
return;
mutex_lock(&kfd_processes_mutex); mutex_lock(&kfd_processes_mutex);
hash_del_rcu(&p->kfd_processes); hash_del_rcu(&p->kfd_processes);
...@@ -250,7 +247,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, ...@@ -250,7 +247,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
kfd_dbgmgr_destroy(pdd->dev->dbgmgr); kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
if (pdd->reset_wavefronts) { if (pdd->reset_wavefronts) {
pr_warn("amdkfd: Resetting all wave fronts\n"); pr_warn("Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p); dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false; pdd->reset_wavefronts = false;
} }
...@@ -407,8 +404,6 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) ...@@ -407,8 +404,6 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
struct kfd_process *p; struct kfd_process *p;
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
BUG_ON(dev == NULL);
/* /*
* Look for the process that matches the pasid. If there is no such * Look for the process that matches the pasid. If there is no such
* process, we either released it in amdkfd's own notifier, or there * process, we either released it in amdkfd's own notifier, or there
...@@ -449,14 +444,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) ...@@ -449,14 +444,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
} }
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) struct kfd_process_device *kfd_get_first_process_device_data(
struct kfd_process *p)
{ {
return list_first_entry(&p->per_device_data, return list_first_entry(&p->per_device_data,
struct kfd_process_device, struct kfd_process_device,
per_device_list); per_device_list);
} }
struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, struct kfd_process_device *kfd_get_next_process_device_data(
struct kfd_process *p,
struct kfd_process_device *pdd) struct kfd_process_device *pdd)
{ {
if (list_is_last(&pdd->per_device_list, &p->per_device_data)) if (list_is_last(&pdd->per_device_list, &p->per_device_data))
......
...@@ -32,12 +32,9 @@ static inline struct process_queue_node *get_queue_by_qid( ...@@ -32,12 +32,9 @@ static inline struct process_queue_node *get_queue_by_qid(
{ {
struct process_queue_node *pqn; struct process_queue_node *pqn;
BUG_ON(!pqm);
list_for_each_entry(pqn, &pqm->queues, process_queue_list) { list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if (pqn->q && pqn->q->properties.queue_id == qid) if ((pqn->q && pqn->q->properties.queue_id == qid) ||
return pqn; (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
return pqn; return pqn;
} }
...@@ -49,17 +46,13 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, ...@@ -49,17 +46,13 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
{ {
unsigned long found; unsigned long found;
BUG_ON(!pqm || !qid);
pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap, found = find_first_zero_bit(pqm->queue_slot_bitmap,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found); pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n", pr_info("Cannot open more queues for process with pasid %d\n",
pqm->process->pasid); pqm->process->pasid);
return -ENOMEM; return -ENOMEM;
} }
...@@ -72,13 +65,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, ...@@ -72,13 +65,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{ {
BUG_ON(!pqm);
INIT_LIST_HEAD(&pqm->queues); INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap = pqm->queue_slot_bitmap =
kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL); BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL) if (!pqm->queue_slot_bitmap)
return -ENOMEM; return -ENOMEM;
pqm->process = p; pqm->process = p;
...@@ -90,10 +81,6 @@ void pqm_uninit(struct process_queue_manager *pqm) ...@@ -90,10 +81,6 @@ void pqm_uninit(struct process_queue_manager *pqm)
int retval; int retval;
struct process_queue_node *pqn, *next; struct process_queue_node *pqn, *next;
BUG_ON(!pqm);
pr_debug("In func %s\n", __func__);
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
retval = pqm_destroy_queue( retval = pqm_destroy_queue(
pqm, pqm,
...@@ -102,7 +89,7 @@ void pqm_uninit(struct process_queue_manager *pqm) ...@@ -102,7 +89,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqn->kq->queue->properties.queue_id); pqn->kq->queue->properties.queue_id);
if (retval != 0) { if (retval != 0) {
pr_err("kfd: failed to destroy queue\n"); pr_err("failed to destroy queue\n");
return; return;
} }
} }
...@@ -117,8 +104,6 @@ static int create_cp_queue(struct process_queue_manager *pqm, ...@@ -117,8 +104,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
{ {
int retval; int retval;
retval = 0;
/* Doorbell initialized in user space*/ /* Doorbell initialized in user space*/
q_properties->doorbell_ptr = NULL; q_properties->doorbell_ptr = NULL;
...@@ -131,16 +116,13 @@ static int create_cp_queue(struct process_queue_manager *pqm, ...@@ -131,16 +116,13 @@ static int create_cp_queue(struct process_queue_manager *pqm,
retval = init_queue(q, q_properties); retval = init_queue(q, q_properties);
if (retval != 0) if (retval != 0)
goto err_init_queue; return retval;
(*q)->device = dev; (*q)->device = dev;
(*q)->process = pqm->process; (*q)->process = pqm->process;
pr_debug("kfd: PQM After init queue"); pr_debug("PQM After init queue");
return retval;
err_init_queue:
return retval; return retval;
} }
...@@ -161,8 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -161,8 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int num_queues = 0; int num_queues = 0;
struct queue *cur; struct queue *cur;
BUG_ON(!pqm || !dev || !properties || !qid);
memset(&q_properties, 0, sizeof(struct queue_properties)); memset(&q_properties, 0, sizeof(struct queue_properties));
memcpy(&q_properties, properties, sizeof(struct queue_properties)); memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL; q = NULL;
...@@ -185,7 +165,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -185,7 +165,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
list_for_each_entry(cur, &pdd->qpd.queues_list, list) list_for_each_entry(cur, &pdd->qpd.queues_list, list)
num_queues++; num_queues++;
if (num_queues >= dev->device_info->max_no_of_hqd/2) if (num_queues >= dev->device_info->max_no_of_hqd/2)
return (-ENOSPC); return -ENOSPC;
} }
retval = find_available_queue_slot(pqm, qid); retval = find_available_queue_slot(pqm, qid);
...@@ -197,7 +177,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -197,7 +177,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
} }
pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL); pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) { if (!pqn) {
retval = -ENOMEM; retval = -ENOMEM;
goto err_allocate_pqn; goto err_allocate_pqn;
...@@ -210,7 +190,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -210,7 +190,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= VMID_PER_DEVICE) || ((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) { (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n"); pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM; retval = -EPERM;
goto err_create_queue; goto err_create_queue;
} }
...@@ -227,7 +207,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -227,7 +207,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
break; break;
case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_DIQ:
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
if (kq == NULL) { if (!kq) {
retval = -ENOMEM; retval = -ENOMEM;
goto err_create_queue; goto err_create_queue;
} }
...@@ -238,22 +218,22 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -238,22 +218,22 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq, &pdd->qpd); kq, &pdd->qpd);
break; break;
default: default:
BUG(); WARN(1, "Invalid queue type %d", type);
break; retval = -EINVAL;
} }
if (retval != 0) { if (retval != 0) {
pr_debug("Error dqm create queue\n"); pr_err("DQM create queue failed\n");
goto err_create_queue; goto err_create_queue;
} }
pr_debug("kfd: PQM After DQM create queue\n"); pr_debug("PQM After DQM create queue\n");
list_add(&pqn->process_queue_list, &pqm->queues); list_add(&pqn->process_queue_list, &pqm->queues);
if (q) { if (q) {
*properties = q->properties; *properties = q->properties;
pr_debug("kfd: PQM done creating queue\n"); pr_debug("PQM done creating queue\n");
print_queue_properties(properties); print_queue_properties(properties);
} }
...@@ -279,14 +259,11 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) ...@@ -279,14 +259,11 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dqm = NULL; dqm = NULL;
BUG_ON(!pqm);
retval = 0; retval = 0;
pr_debug("kfd: In Func %s\n", __func__);
pqn = get_queue_by_qid(pqm, qid); pqn = get_queue_by_qid(pqm, qid);
if (pqn == NULL) { if (!pqn) {
pr_err("kfd: queue id does not match any known queue\n"); pr_err("Queue id does not match any known queue\n");
return -EINVAL; return -EINVAL;
} }
...@@ -295,7 +272,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) ...@@ -295,7 +272,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dev = pqn->kq->dev; dev = pqn->kq->dev;
if (pqn->q) if (pqn->q)
dev = pqn->q->device; dev = pqn->q->device;
BUG_ON(!dev); if (WARN_ON(!dev))
return -ENODEV;
pdd = kfd_get_process_device_data(dev, pqm->process); pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) { if (!pdd) {
...@@ -335,12 +313,9 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, ...@@ -335,12 +313,9 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
int retval; int retval;
struct process_queue_node *pqn; struct process_queue_node *pqn;
BUG_ON(!pqm);
pqn = get_queue_by_qid(pqm, qid); pqn = get_queue_by_qid(pqm, qid);
if (!pqn) { if (!pqn) {
pr_debug("amdkfd: No queue %d exists for update operation\n", pr_debug("No queue %d exists for update operation\n", qid);
qid);
return -EFAULT; return -EFAULT;
} }
...@@ -363,8 +338,6 @@ struct kernel_queue *pqm_get_kernel_queue( ...@@ -363,8 +338,6 @@ struct kernel_queue *pqm_get_kernel_queue(
{ {
struct process_queue_node *pqn; struct process_queue_node *pqn;
BUG_ON(!pqm);
pqn = get_queue_by_qid(pqm, qid); pqn = get_queue_by_qid(pqm, qid);
if (pqn && pqn->kq) if (pqn && pqn->kq)
return pqn->kq; return pqn->kq;
......
...@@ -65,17 +65,15 @@ void print_queue(struct queue *q) ...@@ -65,17 +65,15 @@ void print_queue(struct queue *q)
int init_queue(struct queue **q, const struct queue_properties *properties) int init_queue(struct queue **q, const struct queue_properties *properties)
{ {
struct queue *tmp; struct queue *tmp_q;
BUG_ON(!q); tmp_q = kzalloc(sizeof(*tmp_q), GFP_KERNEL);
if (!tmp_q)
tmp = kzalloc(sizeof(struct queue), GFP_KERNEL);
if (!tmp)
return -ENOMEM; return -ENOMEM;
memcpy(&tmp->properties, properties, sizeof(struct queue_properties)); memcpy(&tmp_q->properties, properties, sizeof(*properties));
*q = tmp; *q = tmp_q;
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -75,12 +75,14 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, ...@@ -75,12 +75,14 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr); uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr); uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id); uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id, unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id); uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
...@@ -482,7 +484,9 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) ...@@ -482,7 +484,9 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
} }
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr) uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm)
{ {
uint32_t wptr_shadow, is_wptr_shadow_valid; uint32_t wptr_shadow, is_wptr_shadow_valid;
struct cik_mqd *m; struct cik_mqd *m;
...@@ -636,7 +640,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) ...@@ -636,7 +640,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false; return false;
} }
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id, unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id) uint32_t queue_id)
{ {
...@@ -785,7 +789,8 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, ...@@ -785,7 +789,8 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
unsigned int watch_point_id, unsigned int watch_point_id,
unsigned int reg_offset) unsigned int reg_offset)
{ {
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]
/ 4;
} }
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid) static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment