Commit 19f470b2 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-amdkfd-next-2017-10-18' of git://people.freedesktop.org/~gabbayo/linux into drm-next

This is the amdkfd pull request for 4.15 merge window.
The patches here are relevant only for Kaveri and Carrizo. Still no dGPU patches.

The main goal is to continue alignment with the internal AMD development tree.

The following is a summary of the changes:
- Improvements and fixes to suspend/resume code
- Improvements and fixes to process termination code
- Cleanups in the queue unmapping functionality
- Reuse code from amdgpu

* tag 'drm-amdkfd-next-2017-10-18' of git://people.freedesktop.org/~gabbayo/linux:
  drm/amdkfd: Improve multiple SDMA queues support per process
  drm/amdkfd: Limit queue number per process and device to 127
  drm/amdkfd: Clean up process queue management
  drm/amdkfd: Compress unnecessary function parameters
  drm/amdkfd: Improve process termination handling
  drm/amdkfd: Avoid submitting an unnecessary packet to HWS
  drm/amdkfd: Fix MQD updates
  drm/amdkfd: Pass filter params to unmap_queues_cpsch
  drm/amdkfd: move locking outside of unmap_queues_cpsch
  drm/amdkfd: Avoid name confusion involved in queue unmapping
  drm/amdkfd: Drop _nocpsch suffix from shared functions
  drm/amdkfd: Reuse CHIP_* from amdgpu v2
  drm/amdkfd: Use VMID bitmap from KGD v2
  drm/amdkfd: Adjust dequeue latencies and timeouts
  drm/amdkfd: Rectify the jiffies calculation error with milliseconds v2
  drm/amdkfd: Fix suspend/resume issue on Carrizo v2
  drm/amdkfd: Reorganize kfd resume code
parents 282dc832 e139cd2a
......@@ -282,8 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p->pasid,
dev->id);
err = pqm_create_queue(&p->pqm, dev, filep, &q_properties,
0, q_properties.type, &queue_id);
err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
if (err != 0)
goto err_create_queue;
......
......@@ -184,9 +184,10 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
struct kernel_queue *kq = NULL;
int status;
properties.type = KFD_QUEUE_TYPE_DIQ;
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
&properties, 0, KFD_QUEUE_TYPE_DIQ,
&qid);
&properties, &qid);
if (status) {
pr_err("Failed to create DIQ\n");
......@@ -769,13 +770,8 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
struct dbg_wave_control_info wac_info;
int temp;
int first_vmid_to_scan = 8;
int last_vmid_to_scan = 15;
first_vmid_to_scan = ffs(dev->shared_resources.compute_vmid_bitmap) - 1;
temp = dev->shared_resources.compute_vmid_bitmap >> first_vmid_to_scan;
last_vmid_to_scan = first_vmid_to_scan + ffz(temp);
int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
reg_sq_cmd.u32All = 0;
status = 0;
......
......@@ -92,6 +92,8 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
static const struct kfd_device_info *lookup_device_info(unsigned short did)
{
size_t i;
......@@ -169,15 +171,8 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
(unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids);
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
if (err < 0) {
dev_err(kfd_device, "error initializing iommu device\n");
return false;
}
if (!kfd_set_pasid_limit(pasid_limit)) {
dev_err(kfd_device, "error setting pasid limit\n");
amd_iommu_free_device(kfd->pdev);
return false;
}
......@@ -189,7 +184,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
if (dev)
kfd_unbind_process_from_device(dev, pasid);
kfd_process_iommu_unbind_callback(dev, pasid);
}
/*
......@@ -224,6 +219,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
......@@ -273,29 +273,22 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_interrupt_error;
}
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
"Error initializing iommuv2 for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto device_iommu_pasid_error;
}
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) {
dev_err(kfd_device, "Error initializing queue manager\n");
goto device_queue_manager_error;
}
if (kfd->dqm->ops.start(kfd->dqm)) {
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n",
"Error initializing iommuv2 for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto dqm_start_error;
goto device_iommu_pasid_error;
}
if (kfd_resume(kfd))
goto kfd_resume_error;
kfd->dbgmgr = NULL;
kfd->init_complete = true;
......@@ -307,11 +300,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto out;
dqm_start_error:
kfd_resume_error:
device_iommu_pasid_error:
device_queue_manager_uninit(kfd->dqm);
device_queue_manager_error:
amd_iommu_free_device(kfd->pdev);
device_iommu_pasid_error:
kfd_interrupt_exit(kfd);
kfd_interrupt_error:
kfd_topology_remove_device(kfd);
......@@ -331,8 +323,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
kgd2kfd_suspend(kfd);
device_queue_manager_uninit(kfd->dqm);
amd_iommu_free_device(kfd->pdev);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
kfd_doorbell_fini(kfd);
......@@ -345,35 +337,59 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
kfd->dqm->ops.stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
amd_iommu_free_device(kfd->pdev);
}
if (!kfd->init_complete)
return;
kfd->dqm->ops.stop(kfd->dqm);
kfd_unbind_processes_from_device(kfd);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
amd_iommu_free_device(kfd->pdev);
}
int kgd2kfd_resume(struct kfd_dev *kfd)
{
unsigned int pasid_limit;
int err;
if (!kfd->init_complete)
return 0;
pasid_limit = kfd_get_pasid_limit();
return kfd_resume(kfd);
if (kfd->init_complete) {
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
if (err < 0) {
dev_err(kfd_device, "failed to initialize iommu\n");
return -ENXIO;
}
}
static int kfd_resume(struct kfd_dev *kfd)
{
int err = 0;
unsigned int pasid_limit = kfd_get_pasid_limit();
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
kfd->dqm->ops.start(kfd->dqm);
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
if (err)
return -ENXIO;
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
amd_iommu_set_invalid_ppr_cb(kfd->pdev,
iommu_invalid_ppr_cb);
err = kfd_bind_processes_to_device(kfd);
if (err)
goto processes_bind_error;
err = kfd->dqm->ops.start(kfd->dqm);
if (err) {
dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto dqm_start_error;
}
return 0;
return err;
dqm_start_error:
processes_bind_error:
amd_iommu_free_device(kfd->pdev);
return err;
}
/* This is called directly from KGD at ISR. */
......
......@@ -29,11 +29,9 @@
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
#define CIK_VMID_NUM (8)
#define KFD_VMID_START_OFFSET (8)
#define VMID_PER_DEVICE CIK_VMID_NUM
#define KFD_DQM_FIRST_PIPE (0)
#define KFD_UNMAP_LATENCY_MS (4000)
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
#define CIK_SDMA_QUEUES (4)
#define CIK_SDMA_QUEUES_PER_ENGINE (2)
#define CIK_SDMA_ENGINE_NUM (2)
......@@ -79,6 +77,8 @@ struct device_process_node {
* @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
* memory apertures.
*
* @process_termination: Clears all process queues belongs to that device.
*
*/
struct device_queue_manager_ops {
......@@ -122,6 +122,9 @@ struct device_queue_manager_ops {
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
int (*process_termination)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
};
struct device_queue_manager_asic_ops {
......
......@@ -185,7 +185,7 @@ static void uninitialize(struct kernel_queue *kq)
kq->mqd->destroy_mqd(kq->mqd,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
KFD_UNMAP_LATENCY_MS,
kq->queue->pipe,
kq->queue->queue);
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
......@@ -303,14 +303,20 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_KAVERI:
kernel_queue_init_cik(&kq->ops_asic_specific);
break;
default:
WARN(1, "Unexpected ASIC family %u",
dev->device_info->asic_family);
goto out_free;
}
if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
pr_err("Failed to init kernel queue\n");
kfree(kq);
return NULL;
}
return kq;
if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
return kq;
pr_err("Failed to init kernel queue\n");
out_free:
kfree(kq);
return NULL;
}
void kernel_queue_uninit(struct kernel_queue *kq)
......
......@@ -31,6 +31,9 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
return mqd_manager_init_cik(type, dev);
case CHIP_CARRIZO:
return mqd_manager_init_vi(type, dev);
default:
WARN(1, "Unexpected ASIC family %u",
dev->device_info->asic_family);
}
return NULL;
......
......@@ -140,8 +140,6 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
struct pm4_mes_map_process *packet;
struct queue *cur;
uint32_t num_queues;
packet = (struct pm4_mes_map_process *)buffer;
......@@ -156,10 +154,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields10.gds_size = qpd->gds_size;
packet->bitfields10.num_gws = qpd->num_gws;
packet->bitfields10.num_oac = qpd->num_oac;
num_queues = 0;
list_for_each_entry(cur, &qpd->queues_list, list)
num_queues++;
packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues;
packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
......@@ -208,7 +203,7 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_map_queues__debug_interface_queue_vi;
break;
case KFD_QUEUE_TYPE_SDMA:
packet->bitfields2.engine_sel =
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
use_static = false; /* no static queues under SDMA */
break;
......@@ -376,7 +371,7 @@ int pm_send_set_resources(struct packet_manager *pm,
packet->bitfields2.queue_type =
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
......@@ -476,7 +471,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
}
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
enum kfd_preempt_type_filter mode,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset,
unsigned int sdma_engine)
{
......@@ -494,8 +489,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet = (struct pm4_mes_unmap_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
mode, reset, type);
pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n",
filter, reset, type);
packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues));
switch (type) {
......@@ -521,29 +516,29 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet->bitfields2.action =
action__mes_unmap_queues__preempt_queues;
switch (mode) {
case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
switch (filter) {
case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
packet->bitfields2.num_queues = 1;
packet->bitfields3b.doorbell_offset0 = filter_param;
break;
case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
packet->bitfields3a.pasid = filter_param;
break;
case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_queues;
break;
case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
WARN(1, "filter %d", mode);
WARN(1, "filter %d", filter);
retval = -EINVAL;
goto err_invalid;
}
......
......@@ -33,6 +33,8 @@
#include <linux/kfd_ioctl.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
#define KFD_SYSFS_FILE_MODE 0444
#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
......@@ -112,11 +114,6 @@ enum cache_policy {
cache_policy_noncoherent
};
enum asic_family_type {
CHIP_KAVERI = 0,
CHIP_CARRIZO
};
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
const uint32_t *ih_ring_entry);
......@@ -125,7 +122,7 @@ struct kfd_event_interrupt_class {
};
struct kfd_device_info {
unsigned int asic_family;
enum amd_asic_type asic_family;
const struct kfd_event_interrupt_class *event_interrupt_class;
unsigned int max_pasid_bits;
unsigned int max_no_of_hqd;
......@@ -141,6 +138,12 @@ struct kfd_mem_obj {
uint32_t *cpu_ptr;
};
struct kfd_vmid_info {
uint32_t first_vmid_kfd;
uint32_t last_vmid_kfd;
uint32_t vmid_num_kfd;
};
struct kfd_dev {
struct kgd_dev *kgd;
......@@ -162,6 +165,7 @@ struct kfd_dev {
*/
struct kgd2kfd_shared_resources shared_resources;
struct kfd_vmid_info vm_info;
const struct kfd2kgd_calls *kfd2kgd;
struct mutex doorbell_mutex;
......@@ -218,22 +222,22 @@ void kfd_chardev_exit(void);
struct device *kfd_chardev(void);
/**
* enum kfd_preempt_type_filter
* enum kfd_unmap_queues_filter
*
* @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
* @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
*
* @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
* @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
* running queues list.
*
* @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
* @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
* specific process.
*
*/
enum kfd_preempt_type_filter {
KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES,
KFD_PREEMPT_TYPE_FILTER_BY_PASID
enum kfd_unmap_queues_filter {
KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
KFD_UNMAP_QUEUES_FILTER_BY_PASID
};
/**
......@@ -401,7 +405,6 @@ struct scheduling_resources {
struct process_queue_manager {
/* data */
struct kfd_process *process;
unsigned int num_concurrent_processes;
struct list_head queues;
unsigned long *queue_slot_bitmap;
};
......@@ -417,6 +420,12 @@ struct qcm_process_device {
unsigned int queue_count;
unsigned int vmid;
bool is_debug;
/* This flag tells if we should reset all wavefronts on
* process termination
*/
bool reset_wavefronts;
/*
* All the memory management data should be here too
*/
......@@ -432,6 +441,13 @@ struct qcm_process_device {
uint32_t sh_hidden_private_base;
};
enum kfd_pdd_bound {
PDD_UNBOUND = 0,
PDD_BOUND,
PDD_BOUND_SUSPENDED,
};
/* Data that is per-process-per device. */
struct kfd_process_device {
/*
......@@ -443,6 +459,8 @@ struct kfd_process_device {
/* The device that owns this data. */
struct kfd_dev *dev;
/* The process that owns this kfd_process_device. */
struct kfd_process *process;
/* per-process-per device QCM data structure */
struct qcm_process_device qpd;
......@@ -456,12 +474,14 @@ struct kfd_process_device {
uint64_t scratch_limit;
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
bool bound;
enum kfd_pdd_bound bound;
/* This flag tells if we should reset all
* wavefronts on process termination
/* Flag used to tell the pdd has dequeued from the dqm.
* This is used to prevent dev->dqm->ops.process_termination() from
* being called twice when it is already called in IOMMU callback
* function.
*/
bool reset_wavefronts;
bool already_dequeued;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
......@@ -547,8 +567,10 @@ struct kfd_process *kfd_get_process(const struct task_struct *);
struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p);
void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
struct kfd_process *p);
int kfd_bind_processes_to_device(struct kfd_dev *dev);
void kfd_unbind_processes_from_device(struct kfd_dev *dev);
void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
......@@ -646,14 +668,14 @@ struct process_queue_node {
struct list_head process_queue_list;
};
void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
void pqm_uninit(struct process_queue_manager *pqm);
int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
unsigned int flags,
enum kfd_queue_type type,
unsigned int *qid);
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
......@@ -663,15 +685,12 @@ struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
unsigned long timeout);
unsigned int timeout_ms);
/* Packet Manager */
#define KFD_HIQ_TIMEOUT (500)
#define KFD_FENCE_COMPLETED (100)
#define KFD_FENCE_INIT (10)
#define KFD_UNMAP_LATENCY (150)
struct packet_manager {
struct device_queue_manager *dqm;
......@@ -690,7 +709,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value);
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
enum kfd_preempt_type_filter mode,
enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset,
unsigned int sdma_engine);
......
......@@ -171,12 +171,10 @@ static void kfd_process_wq_release(struct work_struct *work)
pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
pdd->dev->id, p->pasid);
if (pdd->reset_wavefronts)
dbgdev_wave_reset_wavefronts(pdd->dev, p);
if (pdd->bound == PDD_BOUND)
amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
list_del(&pdd->per_device_list);
kfree(pdd);
}
......@@ -236,24 +234,17 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
mutex_lock(&p->mutex);
/* In case our notifier is called before IOMMU notifier */
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
/* Iterate over all process device data structure and check
* if we should delete debug managers and reset all wavefronts
* if we should delete debug managers
*/
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
if ((pdd->dev->dbgmgr) &&
(pdd->dev->dbgmgr->pasid == p->pasid))
kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
if (pdd->reset_wavefronts) {
pr_warn("Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
}
}
mutex_unlock(&p->mutex);
/*
......@@ -351,9 +342,9 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
if (pdd->dev == dev)
break;
return pdd;
return pdd;
return NULL;
}
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
......@@ -367,7 +358,9 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
pdd->reset_wavefronts = false;
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
list_add(&pdd->per_device_list, &p->per_device_data);
}
......@@ -393,19 +386,91 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
if (pdd->bound)
if (pdd->bound == PDD_BOUND) {
return pdd;
} else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
return ERR_PTR(-EINVAL);
}
err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
if (err < 0)
return ERR_PTR(err);
pdd->bound = true;
pdd->bound = PDD_BOUND;
return pdd;
}
void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
/*
* Bind processes do the device that have been temporarily unbound
* (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
*/
int kfd_bind_processes_to_device(struct kfd_dev *dev)
{
struct kfd_process_device *pdd;
struct kfd_process *p;
unsigned int temp;
int err = 0;
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
mutex_lock(&p->mutex);
pdd = kfd_get_process_device_data(dev, p);
if (pdd->bound != PDD_BOUND_SUSPENDED) {
mutex_unlock(&p->mutex);
continue;
}
err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
p->lead_thread);
if (err < 0) {
pr_err("unexpected pasid %d binding failure\n",
p->pasid);
mutex_unlock(&p->mutex);
break;
}
pdd->bound = PDD_BOUND;
mutex_unlock(&p->mutex);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
return err;
}
/*
* Temporarily unbind currently bound processes from the device and
* mark them as PDD_BOUND_SUSPENDED. These processes will be restored
* to PDD_BOUND state in kfd_bind_processes_to_device.
*/
void kfd_unbind_processes_from_device(struct kfd_dev *dev)
{
struct kfd_process_device *pdd;
struct kfd_process *p;
unsigned int temp, temp_bound, temp_pasid;
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
mutex_lock(&p->mutex);
pdd = kfd_get_process_device_data(dev, p);
temp_bound = pdd->bound;
temp_pasid = p->pasid;
if (pdd->bound == PDD_BOUND)
pdd->bound = PDD_BOUND_SUSPENDED;
mutex_unlock(&p->mutex);
if (temp_bound == PDD_BOUND)
amd_iommu_unbind_pasid(dev->pdev, temp_pasid);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
}
void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
{
struct kfd_process *p;
struct kfd_process_device *pdd;
......@@ -424,28 +489,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
kfd_dbgmgr_destroy(dev->dbgmgr);
pqm_uninit(&p->pqm);
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
mutex_unlock(&p->mutex);
return;
}
if (pdd->reset_wavefronts) {
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
}
/*
* Just mark pdd as unbound, because we still need it
* to call amd_iommu_unbind_pasid() in when the
* process exits.
* We don't call amd_iommu_unbind_pasid() here
* because the IOMMU called us.
*/
pdd->bound = false;
if (pdd)
/* For GPU relying on IOMMU, we need to dequeue here
* when PASID is still bound.
*/
kfd_process_dequeue_from_device(pdd);
mutex_unlock(&p->mutex);
}
......
......@@ -63,6 +63,25 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
return 0;
}
void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
if (pdd->already_dequeued)
return;
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
pdd->already_dequeued = true;
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
{
struct kfd_process_device *pdd;
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
kfd_process_dequeue_from_device(pdd);
}
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
INIT_LIST_HEAD(&pqm->queues);
......@@ -78,21 +97,14 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
void pqm_uninit(struct process_queue_manager *pqm)
{
int retval;
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
retval = pqm_destroy_queue(
pqm,
(pqn->q != NULL) ?
pqn->q->properties.queue_id :
pqn->kq->queue->properties.queue_id);
if (retval != 0) {
pr_err("failed to destroy queue\n");
return;
}
uninit_queue(pqn->q);
list_del(&pqn->process_queue_list);
kfree(pqn);
}
kfree(pqm->queue_slot_bitmap);
pqm->queue_slot_bitmap = NULL;
}
......@@ -130,20 +142,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
unsigned int flags,
enum kfd_queue_type type,
unsigned int *qid)
{
int retval;
struct kfd_process_device *pdd;
struct queue_properties q_properties;
struct queue *q;
struct process_queue_node *pqn;
struct kernel_queue *kq;
int num_queues = 0;
struct queue *cur;
enum kfd_queue_type type = properties->type;
unsigned int max_queues = 127; /* HWS limit */
memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL;
kq = NULL;
......@@ -159,13 +167,11 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* If we are just about to create DIQ, the is_debug flag is not set yet
* Hence we also check the type as well
*/
if ((pdd->qpd.is_debug) ||
(type == KFD_QUEUE_TYPE_DIQ)) {
list_for_each_entry(cur, &pdd->qpd.queues_list, list)
num_queues++;
if (num_queues >= dev->device_info->max_no_of_hqd/2)
return -ENOSPC;
}
if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
max_queues = dev->device_info->max_no_of_hqd/2;
if (pdd->qpd.queue_count >= max_queues)
return -ENOSPC;
retval = find_available_queue_slot(pqm, qid);
if (retval != 0)
......@@ -187,14 +193,14 @@ int pqm_create_queue(struct process_queue_manager *pqm,
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid);
retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
......@@ -231,9 +237,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
list_add(&pqn->process_queue_list, &pqm->queues);
if (q) {
*properties = q->properties;
pr_debug("PQM done creating queue\n");
print_queue_properties(properties);
print_queue_properties(&q->properties);
}
return retval;
......@@ -290,9 +295,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (pqn->q) {
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval != 0)
return retval;
uninit_queue(pqn->q);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment