Commit f43dff0e authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-amdkfd-next-fixes-2015-01-25' of...

Merge tag 'drm-amdkfd-next-fixes-2015-01-25' of git://people.freedesktop.org/~gabbayo/linux into drm-next

Here is a pull request of fixes for 3.20 patches, including the fix you asked
me when you merged the previous pull request.

* tag 'drm-amdkfd-next-fixes-2015-01-25' of git://people.freedesktop.org/~gabbayo/linux:
  drm/amdkfd: change amdkfd version to 0.7.1
  drm/radeon: cik_sdma_ctx_switch_enable() can be static
  drm/amdkfd: Fix sparse errors
  drm/amdkfd: Handle case of invalid queue type
  drm/amdkfd: Add break at the end of case
  drm/amdkfd: Remove negative check of uint variable
parents e37bfa1a f9dcced8
...@@ -141,8 +141,6 @@ static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, ...@@ -141,8 +141,6 @@ static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
static int set_queue_properties_from_user(struct queue_properties *q_properties, static int set_queue_properties_from_user(struct queue_properties *q_properties,
struct kfd_ioctl_create_queue_args *args) struct kfd_ioctl_create_queue_args *args)
{ {
void *tmp;
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL; return -EINVAL;
...@@ -180,16 +178,18 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, ...@@ -180,16 +178,18 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EFAULT; return -EFAULT;
} }
tmp = (void *)(uintptr_t)args->eop_buffer_address; if (args->eop_buffer_address &&
if (tmp != NULL && !access_ok(VERIFY_WRITE,
!access_ok(VERIFY_WRITE, tmp, sizeof(uint32_t))) { (const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) {
pr_debug("kfd: can't access eop buffer"); pr_debug("kfd: can't access eop buffer");
return -EFAULT; return -EFAULT;
} }
tmp = (void *)(uintptr_t)args->ctx_save_restore_address; if (args->ctx_save_restore_address &&
if (tmp != NULL && !access_ok(VERIFY_WRITE,
!access_ok(VERIFY_WRITE, tmp, sizeof(uint32_t))) { (const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) {
pr_debug("kfd: can't access ctx save restore buffer"); pr_debug("kfd: can't access ctx save restore buffer");
return -EFAULT; return -EFAULT;
} }
......
...@@ -62,12 +62,6 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) ...@@ -62,12 +62,6 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
return KFD_MQD_TYPE_CP; return KFD_MQD_TYPE_CP;
} }
inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.compute_pipe_count;
}
static inline unsigned int get_first_pipe(struct device_queue_manager *dqm) static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
{ {
BUG_ON(!dqm); BUG_ON(!dqm);
...@@ -79,25 +73,6 @@ static inline unsigned int get_pipes_num_cpsch(void) ...@@ -79,25 +73,6 @@ static inline unsigned int get_pipes_num_cpsch(void)
return PIPE_PER_ME_CP_SCHEDULING; return PIPE_PER_ME_CP_SCHEDULING;
} }
inline unsigned int
get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
{
uint32_t nybble;
nybble = (pdd->lds_base >> 60) & 0x0E;
return nybble;
}
inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
unsigned int shared_base;
shared_base = (pdd->lds_base >> 16) & 0xFF;
return shared_base;
}
void program_sh_mem_settings(struct device_queue_manager *dqm, void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd) struct qcm_process_device *qpd)
{ {
...@@ -301,6 +276,11 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -301,6 +276,11 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
} }
dqm->sdma_queue_count--; dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q->sdma_id); deallocate_sdma_queue(dqm, q->sdma_id);
} else {
pr_debug("q->properties.type is invalid (%d)\n",
q->properties.type);
retval = -EINVAL;
goto out;
} }
retval = mqd->destroy_mqd(mqd, q->mqd, retval = mqd->destroy_mqd(mqd, q->mqd,
...@@ -331,7 +311,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) ...@@ -331,7 +311,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
BUG_ON(!dqm || !q || !q->mqd); BUG_ON(!dqm || !q || !q->mqd);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
mqd = dqm->ops.get_mqd_manager(dqm, q->properties.type); mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (mqd == NULL) { if (mqd == NULL) {
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return -ENOMEM; return -ENOMEM;
...@@ -587,7 +568,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, ...@@ -587,7 +568,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm, static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id) unsigned int sdma_queue_id)
{ {
if (sdma_queue_id < 0 || sdma_queue_id >= CIK_SDMA_QUEUES) if (sdma_queue_id >= CIK_SDMA_QUEUES)
return; return;
set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap); set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
} }
...@@ -1114,8 +1095,11 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) ...@@ -1114,8 +1095,11 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
switch (dev->device_info->asic_family) { switch (dev->device_info->asic_family) {
case CHIP_CARRIZO: case CHIP_CARRIZO:
device_queue_manager_init_vi(&dqm->ops_asic_specific); device_queue_manager_init_vi(&dqm->ops_asic_specific);
break;
case CHIP_KAVERI: case CHIP_KAVERI:
device_queue_manager_init_cik(&dqm->ops_asic_specific); device_queue_manager_init_cik(&dqm->ops_asic_specific);
break;
} }
if (dqm->ops.initialize(dqm) != 0) { if (dqm->ops.initialize(dqm) != 0) {
......
...@@ -160,10 +160,24 @@ void device_queue_manager_init_cik(struct device_queue_manager_ops *ops); ...@@ -160,10 +160,24 @@ void device_queue_manager_init_cik(struct device_queue_manager_ops *ops);
void device_queue_manager_init_vi(struct device_queue_manager_ops *ops); void device_queue_manager_init_vi(struct device_queue_manager_ops *ops);
void program_sh_mem_settings(struct device_queue_manager *dqm, void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd); struct qcm_process_device *qpd);
inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *qpd);
inline unsigned int get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd);
int init_pipelines(struct device_queue_manager *dqm, int init_pipelines(struct device_queue_manager *dqm,
unsigned int pipes_num, unsigned int first_pipe); unsigned int pipes_num, unsigned int first_pipe);
inline unsigned int get_pipes_num(struct device_queue_manager *dqm);
extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
return (pdd->lds_base >> 16) & 0xFF;
}
extern inline unsigned int
get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
{
return (pdd->lds_base >> 60) & 0x0E;
}
extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.compute_pipe_count;
}
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
...@@ -288,8 +288,11 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, ...@@ -288,8 +288,11 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
switch (dev->device_info->asic_family) { switch (dev->device_info->asic_family) {
case CHIP_CARRIZO: case CHIP_CARRIZO:
kernel_queue_init_vi(&kq->ops_asic_specific); kernel_queue_init_vi(&kq->ops_asic_specific);
break;
case CHIP_KAVERI: case CHIP_KAVERI:
kernel_queue_init_cik(&kq->ops_asic_specific); kernel_queue_init_cik(&kq->ops_asic_specific);
break;
} }
if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
......
...@@ -29,10 +29,10 @@ ...@@ -29,10 +29,10 @@
#define KFD_DRIVER_AUTHOR "AMD Inc. and others" #define KFD_DRIVER_AUTHOR "AMD Inc. and others"
#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs" #define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
#define KFD_DRIVER_DATE "20141113" #define KFD_DRIVER_DATE "20150122"
#define KFD_DRIVER_MAJOR 0 #define KFD_DRIVER_MAJOR 0
#define KFD_DRIVER_MINOR 7 #define KFD_DRIVER_MINOR 7
#define KFD_DRIVER_PATCHLEVEL 0 #define KFD_DRIVER_PATCHLEVEL 1
const struct kfd2kgd_calls *kfd2kgd; const struct kfd2kgd_calls *kfd2kgd;
static const struct kgd2kfd_calls kgd2kfd = { static const struct kgd2kfd_calls kgd2kfd = {
......
...@@ -290,7 +290,7 @@ static void cik_sdma_rlc_stop(struct radeon_device *rdev) ...@@ -290,7 +290,7 @@ static void cik_sdma_rlc_stop(struct radeon_device *rdev)
* *
* Halt or unhalt the async dma engines (CIK). * Halt or unhalt the async dma engines (CIK).
*/ */
void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable) static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
{ {
uint32_t reg_offset, value; uint32_t reg_offset, value;
int i; int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment