Commit c59c961c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "This feels larger than I'd like but its for three reasons.

   a) amdkfd finalising the API more, this is a new feature introduced
      last merge window, and I'd prefer to make the tweaks to the API
      before it first gets into a stable release.

   b) radeon regression required splitting an internal API to fix
      properly, so it just changed a few more lines

   c) vmwgfx fix changes a lock from a mutex->spin lock, this is fallout
      from the new sleep checking.

  Otherwise there is just some tda998x fixes"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: Remove rdev->gart.pages_addr array
  drm/radeon: Restore GART table contents after pinning it in VRAM v3
  drm/radeon: Split off gart_get_page_entry ASIC hook from set_page_entry
  drm/amdkfd: Fix bug in call to init_pipelines()
  drm/amdkfd: Fix bug in pipelines initialization
  drm/radeon: Don't increment pipe_id in kgd_init_pipeline
  drm/i2c: tda998x: set the CEC I2C address based on the slave I2C address
  drm/vmwgfx: Replace the hw mutex with a hw spinlock
  drm/amdkfd: Allow user to limit only queues per device
  drm/amdkfd: PQM handle queue creation fault
  drm: tda998x: Fix EDID read timeout on HDMI connect
  drm: tda998x: Protect the page register
parents 59343cd7 5159571c
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "kfd_priv.h" #include "kfd_priv.h"
#include "kfd_device_queue_manager.h" #include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768 #define MQD_SIZE_ALIGNED 768
...@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources; kfd->shared_resources = *gpu_resources;
/* calculate max size of mqds needed for queues */ /* calculate max size of mqds needed for queues */
size = max_num_of_processes * size = max_num_of_queues_per_device *
max_num_of_queues_per_process * kfd->device_info->mqd_size_aligned;
kfd->device_info->mqd_size_aligned;
/* add another 512KB for all other allocations on gart */ /* add another 512KB for all other allocations on gart */
size += 512 * 1024; size += 512 * 1024;
......
...@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
}
if (list_empty(&qpd->queues_list)) { if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q); retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) { if (retval != 0) {
...@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list); list_add(&q->list, &qpd->queues_list);
dqm->queue_count++; dqm->queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return 0; return 0;
} }
...@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
if (list_empty(&qpd->queues_list)) if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q); deallocate_vmid(dqm, qpd, q);
dqm->queue_count--; dqm->queue_count--;
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
*/
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
out: out:
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return retval; return retval;
...@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm, ...@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
for (i = 0; i < pipes_num; i++) { for (i = 0; i < pipes_num; i++) {
inx = i + first_pipe; inx = i + first_pipe;
/*
* HPD buffer on GTT is allocated by amdkfd, no need to waste
* space in GTT for pipelines we don't initialize
*/
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */ /* = log2(bytes/4)-1 */
kfd2kgd->init_pipeline(dqm->dev->kgd, i, kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
} }
...@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm) ...@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__); pr_debug("kfd: In %s\n", __func__);
retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
if (retval != 0) if (retval != 0)
return retval; return retval;
...@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, ...@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In func %s\n", __func__); pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
}
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list); list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++; dqm->queue_count++;
qpd->is_debug = true; qpd->is_debug = true;
...@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, ...@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--; dqm->queue_count--;
qpd->is_debug = false; qpd->is_debug = false;
execute_queues_cpsch(dqm, false); execute_queues_cpsch(dqm, false);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
} }
...@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
goto out;
}
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
if (mqd == NULL) { if (mqd == NULL) {
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
...@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false); retval = execute_queues_cpsch(dqm, false);
} }
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
out: out:
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return retval; return retval;
...@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, ...@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
*/
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return 0; return 0;
......
...@@ -130,6 +130,7 @@ struct device_queue_manager { ...@@ -130,6 +130,7 @@ struct device_queue_manager {
struct list_head queues; struct list_head queues;
unsigned int processes_count; unsigned int processes_count;
unsigned int queue_count; unsigned int queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate; unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues; unsigned int *allocated_queues;
unsigned int vmid_bitmap; unsigned int vmid_bitmap;
......
...@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444); ...@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy, MODULE_PARM_DESC(sched_policy,
"Kernel cmdline parameter that defines the amdkfd scheduling policy"); "Kernel cmdline parameter that defines the amdkfd scheduling policy");
int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_processes, int, 0444); module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_processes, MODULE_PARM_DESC(max_num_of_queues_per_device,
"Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
module_param(max_num_of_queues_per_process, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_process,
"Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
bool kgd2kfd_init(unsigned interface_version, bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g, const struct kfd2kgd_calls *f2g,
...@@ -100,16 +95,10 @@ static int __init kfd_module_init(void) ...@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
} }
/* Verify module parameters */ /* Verify module parameters */
if ((max_num_of_processes < 0) || if ((max_num_of_queues_per_device < 0) ||
(max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { (max_num_of_queues_per_device >
pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
return -1; pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
}
if ((max_num_of_queues_per_process < 0) ||
(max_num_of_queues_per_process >
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
return -1; return -1;
} }
......
...@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex); ...@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
int kfd_pasid_init(void) int kfd_pasid_init(void)
{ {
pasid_limit = max_num_of_processes; pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap) if (!pasid_bitmap)
......
...@@ -52,20 +52,19 @@ ...@@ -52,20 +52,19 @@
#define kfd_alloc_struct(ptr_to_struct) \ #define kfd_alloc_struct(ptr_to_struct) \
((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
/* Kernel module parameter to specify maximum number of supported processes */
extern int max_num_of_processes;
#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
#define KFD_MAX_NUM_OF_PROCESSES 512 #define KFD_MAX_NUM_OF_PROCESSES 512
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/* /*
* Kernel module parameter to specify maximum number of supported queues * Kernel module parameter to specify maximum number of supported queues per
* per process * device
*/ */
extern int max_num_of_queues_per_process; extern int max_num_of_queues_per_device;
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
(KFD_MAX_NUM_OF_PROCESSES * \
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
#define KFD_KERNEL_QUEUE_SIZE 2048 #define KFD_KERNEL_QUEUE_SIZE 2048
......
...@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, ...@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("kfd: in %s\n", __func__); pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap, found = find_first_zero_bit(pqm->queue_slot_bitmap,
max_num_of_queues_per_process); KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found); pr_debug("kfd: the new slot id %lu\n", found);
if (found >= max_num_of_queues_per_process) { if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n", pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
pqm->process->pasid); pqm->process->pasid);
return -ENOMEM; return -ENOMEM;
...@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) ...@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
INIT_LIST_HEAD(&pqm->queues); INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap = pqm->queue_slot_bitmap =
kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL); BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL) if (pqm->queue_slot_bitmap == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->kq = NULL; pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid); &q->properties.vmid);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q); print_queue(q);
break; break;
case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_DIQ:
...@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
} }
if (retval != 0) { if (retval != 0) {
pr_err("kfd: error dqm create queue\n"); pr_debug("Error dqm create queue\n");
goto err_create_queue; goto err_create_queue;
} }
...@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue: err_create_queue:
kfree(pqn); kfree(pqn);
err_allocate_pqn: err_allocate_pqn:
/* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap); clear_bit(*qid, pqm->queue_slot_bitmap);
if (list_empty(&pqm->queues))
dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
return retval; return retval;
} }
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
struct tda998x_priv { struct tda998x_priv {
struct i2c_client *cec; struct i2c_client *cec;
struct i2c_client *hdmi; struct i2c_client *hdmi;
struct mutex mutex;
struct delayed_work dwork;
uint16_t rev; uint16_t rev;
uint8_t current_page; uint8_t current_page;
int dpms; int dpms;
...@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) ...@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
uint8_t addr = REG2ADDR(reg); uint8_t addr = REG2ADDR(reg);
int ret; int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg); ret = set_page(priv, reg);
if (ret < 0) if (ret < 0)
return ret; goto out;
ret = i2c_master_send(client, &addr, sizeof(addr)); ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0) if (ret < 0)
...@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) ...@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
if (ret < 0) if (ret < 0)
goto fail; goto fail;
return ret; goto out;
fail: fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
return ret; return ret;
} }
...@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) ...@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
buf[0] = REG2ADDR(reg); buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt); memcpy(&buf[1], p, cnt);
mutex_lock(&priv->mutex);
ret = set_page(priv, reg); ret = set_page(priv, reg);
if (ret < 0) if (ret < 0)
return; goto out;
ret = i2c_master_send(client, buf, cnt + 1); ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0) if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
} }
static int static int
...@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) ...@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
uint8_t buf[] = {REG2ADDR(reg), val}; uint8_t buf[] = {REG2ADDR(reg), val};
int ret; int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg); ret = set_page(priv, reg);
if (ret < 0) if (ret < 0)
return; goto out;
ret = i2c_master_send(client, buf, sizeof(buf)); ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0) if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
} }
static void static void
...@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) ...@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret; int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg); ret = set_page(priv, reg);
if (ret < 0) if (ret < 0)
return; goto out;
ret = i2c_master_send(client, buf, sizeof(buf)); ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0) if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
} }
static void static void
...@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv) ...@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
} }
/* handle HDMI connect/disconnect */
static void tda998x_hpd(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct tda998x_priv *priv =
container_of(dwork, struct tda998x_priv, dwork);
if (priv->encoder && priv->encoder->dev)
drm_kms_helper_hotplug_event(priv->encoder->dev);
}
/* /*
* only 2 interrupts may occur: screen plug/unplug and EDID read * only 2 interrupts may occur: screen plug/unplug and EDID read
*/ */
...@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) ...@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
priv->wq_edid_wait = 0; priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid); wake_up(&priv->wq_edid);
} else if (cec != 0) { /* HPD change */ } else if (cec != 0) { /* HPD change */
if (priv->encoder && priv->encoder->dev) schedule_delayed_work(&priv->dwork, HZ/10);
drm_helper_hpd_irq_event(priv->encoder->dev);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv) ...@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
/* disable all IRQs and free the IRQ handler */ /* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0); cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (priv->hdmi->irq) if (priv->hdmi->irq) {
free_irq(priv->hdmi->irq, priv); free_irq(priv->hdmi->irq, priv);
cancel_delayed_work_sync(&priv->dwork);
}
i2c_unregister_device(priv->cec); i2c_unregister_device(priv->cec);
} }
...@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) ...@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node; struct device_node *np = client->dev.of_node;
u32 video; u32 video;
int rev_lo, rev_hi, ret; int rev_lo, rev_hi, ret;
unsigned short cec_addr;
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
...@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) ...@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->current_page = 0xff; priv->current_page = 0xff;
priv->hdmi = client; priv->hdmi = client;
priv->cec = i2c_new_dummy(client->adapter, 0x34); /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
cec_addr = 0x34 + (client->addr & 0x03);
priv->cec = i2c_new_dummy(client->adapter, cec_addr);
if (!priv->cec) if (!priv->cec)
return -ENODEV; return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF; priv->dpms = DRM_MODE_DPMS_OFF;
mutex_init(&priv->mutex); /* protect the page access */
/* wake up the device: */ /* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS, cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
...@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) ...@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (client->irq) { if (client->irq) {
int irqf_trigger; int irqf_trigger;
/* init read EDID waitqueue */ /* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid); init_waitqueue_head(&priv->wq_edid);
INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
/* clear pending interrupts */ /* clear pending interrupts */
reg_read(priv, REG_INT_FLAGS_0); reg_read(priv, REG_INT_FLAGS_0);
......
...@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev, ...@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) { if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr); value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) { } else if (flags & R600_PTE_VALID) {
value = addr; value = addr;
} else { } else {
......
...@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev, ...@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) { if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr); value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) { } else if (flags & R600_PTE_VALID) {
value = addr; value = addr;
} else { } else {
......
...@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) ...@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r; return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page; rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev); return radeon_gart_table_ram_alloc(rdev);
} }
...@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev) ...@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0); WREG32(RADEON_AIC_HI_ADDR, 0);
} }
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
return addr;
}
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags) uint64_t entry)
{ {
u32 *gtt = rdev->gart.ptr; u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(addr)); gtt[i] = cpu_to_le32(lower_32_bits(entry));
} }
void r100_pci_gart_fini(struct radeon_device *rdev) void r100_pci_gart_fini(struct radeon_device *rdev)
......
...@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) ...@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3) #define R300_PTE_READABLE (1 << 3)
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
uint64_t addr, uint32_t flags)
{ {
void __iomem *ptr = rdev->gart.ptr;
addr = (lower_32_bits(addr) >> 8) | addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24); ((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ) if (flags & RADEON_GART_PAGE_READ)
...@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, ...@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R300_PTE_WRITEABLE; addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP)) if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED; addr |= R300_PTE_UNSNOOPED;
return addr;
}
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t entry)
{
void __iomem *ptr = rdev->gart.ptr;
/* on x86 we want this to be CPU endian, on powerpc /* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way * on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */ * into VRAM - so no need for cpu_to_le32 on VRAM tables */
writel(addr, ((void __iomem *)ptr) + (i * 4)); writel(entry, ((void __iomem *)ptr) + (i * 4));
} }
int rv370_pcie_gart_init(struct radeon_device *rdev) int rv370_pcie_gart_init(struct radeon_device *rdev)
...@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) ...@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev); return radeon_gart_table_vram_alloc(rdev);
} }
......
...@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev); ...@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page * Dummy page
*/ */
struct radeon_dummy_page { struct radeon_dummy_page {
uint64_t entry;
struct page *page; struct page *page;
dma_addr_t addr; dma_addr_t addr;
}; };
...@@ -645,7 +646,7 @@ struct radeon_gart { ...@@ -645,7 +646,7 @@ struct radeon_gart {
unsigned num_cpu_pages; unsigned num_cpu_pages;
unsigned table_size; unsigned table_size;
struct page **pages; struct page **pages;
dma_addr_t *pages_addr; uint64_t *pages_entry;
bool ready; bool ready;
}; };
...@@ -1847,8 +1848,9 @@ struct radeon_asic { ...@@ -1847,8 +1848,9 @@ struct radeon_asic {
/* gart */ /* gart */
struct { struct {
void (*tlb_flush)(struct radeon_device *rdev); void (*tlb_flush)(struct radeon_device *rdev);
uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
void (*set_page)(struct radeon_device *rdev, unsigned i, void (*set_page)(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
} gart; } gart;
struct { struct {
int (*init)(struct radeon_device *rdev); int (*init)(struct radeon_device *rdev);
...@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) ...@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
......
...@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev) ...@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n"); DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE; rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else { } else {
DRM_INFO("Forcing AGP to PCI mode\n"); DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI; rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page; rdev->asic->gart.set_page = &r100_pci_gart_set_page;
} }
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
...@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { ...@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &r100_pci_gart_tlb_flush, .tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page, .set_page = &r100_pci_gart_set_page,
}, },
.ring = { .ring = {
...@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { ...@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &r100_pci_gart_tlb_flush, .tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page, .set_page = &r100_pci_gart_set_page,
}, },
.ring = { .ring = {
...@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = { ...@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &r100_pci_gart_tlb_flush, .tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page, .set_page = &r100_pci_gart_set_page,
}, },
.ring = { .ring = {
...@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = { ...@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
.mc_wait_for_idle = &r300_mc_wait_for_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush, .tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
...@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = { ...@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush, .tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
...@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = { ...@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
.mc_wait_for_idle = &rs400_mc_wait_for_idle, .mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rs400_gart_tlb_flush, .tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page, .set_page = &rs400_gart_set_page,
}, },
.ring = { .ring = {
...@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = { ...@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
.mc_wait_for_idle = &rs600_mc_wait_for_idle, .mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rs600_gart_tlb_flush, .tlb_flush = &rs600_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
...@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = { ...@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
.mc_wait_for_idle = &rs690_mc_wait_for_idle, .mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rs400_gart_tlb_flush, .tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page, .set_page = &rs400_gart_set_page,
}, },
.ring = { .ring = {
...@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = { ...@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
.mc_wait_for_idle = &rv515_mc_wait_for_idle, .mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush, .tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
...@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = { ...@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
.mc_wait_for_idle = &r520_mc_wait_for_idle, .mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush, .tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
...@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = { ...@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush, .tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
...@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = { ...@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush, .tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
...@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = { ...@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush, .tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
...@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = { ...@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush, .tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
...@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = { ...@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush, .tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
...@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = { ...@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush, .tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
...@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = { ...@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush, .tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
...@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = { ...@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush, .tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
...@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = { ...@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush, .tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
...@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = { ...@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
.get_gpu_clock_counter = &si_get_gpu_clock_counter, .get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &si_pcie_gart_tlb_flush, .tlb_flush = &si_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
...@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = { ...@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter, .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush, .tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
...@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = { ...@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter, .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush, .tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
......
...@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); ...@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev); int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev); void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev); int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev);
...@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, ...@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p); extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev);
...@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev); ...@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev); extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev); extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev);
uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev); int rs400_gart_init(struct radeon_device *rdev);
...@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev); ...@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev); void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev); void rs600_gart_tlb_flush(struct radeon_device *rdev);
uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev); void rs600_bandwidth_update(struct radeon_device *rdev);
......
...@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev) ...@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = NULL; rdev->dummy_page.page = NULL;
return -ENOMEM; return -ENOMEM;
} }
rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
RADEON_GART_PAGE_DUMMY);
return 0; return 0;
} }
......
...@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) ...@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unpin(rdev->gart.robj); radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj); radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr; rdev->gart.table_addr = gpu_addr;
if (!r) {
int i;
/* We might have dropped some GART table updates while it wasn't
* mapped, restore all entries
*/
for (i = 0; i < rdev->gart.num_gpu_pages; i++)
radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
mb();
radeon_gart_tlb_flush(rdev);
}
return r; return r;
} }
...@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t; unsigned t;
unsigned p; unsigned p;
int i, j; int i, j;
u64 page_base;
if (!rdev->gart.ready) { if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n"); WARN(1, "trying to unbind memory from uninitialized GART !\n");
...@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) { if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL; rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) { if (rdev->gart.ptr) {
radeon_gart_set_page(rdev, t, page_base, radeon_gart_set_page(rdev, t,
RADEON_GART_PAGE_DUMMY); rdev->dummy_page.entry);
} }
page_base += RADEON_GPU_PAGE_SIZE;
} }
} }
} }
...@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
{ {
unsigned t; unsigned t;
unsigned p; unsigned p;
uint64_t page_base; uint64_t page_base, page_entry;
int i, j; int i, j;
if (!rdev->gart.ready) { if (!rdev->gart.ready) {
...@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i]; rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) { page_base = dma_addr[i];
page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { page_entry = radeon_gart_get_page_entry(page_base, flags);
radeon_gart_set_page(rdev, t, page_base, flags); rdev->gart.pages_entry[t] = page_entry;
page_base += RADEON_GPU_PAGE_SIZE; if (rdev->gart.ptr) {
radeon_gart_set_page(rdev, t, page_entry);
} }
page_base += RADEON_GPU_PAGE_SIZE;
} }
} }
mb(); mb();
...@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev) ...@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
return -ENOMEM; return -ENOMEM;
} }
rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
rdev->gart.num_cpu_pages); rdev->gart.num_gpu_pages);
if (rdev->gart.pages_addr == NULL) { if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
return -ENOMEM; return -ENOMEM;
} }
/* set GART entry to point to the dummy page by default */ /* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) { for (i = 0; i < rdev->gart.num_gpu_pages; i++)
rdev->gart.pages_addr[i] = rdev->dummy_page.addr; rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
}
return 0; return 0;
} }
...@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev) ...@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
*/ */
void radeon_gart_fini(struct radeon_device *rdev) void radeon_gart_fini(struct radeon_device *rdev)
{ {
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { if (rdev->gart.ready) {
/* unbind pages */ /* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
} }
rdev->gart.ready = false; rdev->gart.ready = false;
vfree(rdev->gart.pages); vfree(rdev->gart.pages);
vfree(rdev->gart.pages_addr); vfree(rdev->gart.pages_entry);
rdev->gart.pages = NULL; rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL; rdev->gart.pages_entry = NULL;
radeon_dummy_page_fini(rdev); radeon_dummy_page_fini(rdev);
} }
...@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd) ...@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr) uint32_t hpd_size, uint64_t hpd_gpu_addr)
{ {
uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
lock_srbm(kgd, mec, pipe, 0, 0); lock_srbm(kgd, mec, pipe, 0, 0);
......
...@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) ...@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
uint64_t result; uint64_t result;
/* page table offset */ /* page table offset */
result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
result &= ~RADEON_GPU_PAGE_MASK;
/* in case cpu page size != gpu page size*/
result |= addr & (~PAGE_MASK);
return result; return result;
} }
......
...@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev) ...@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
#define RS400_PTE_WRITEABLE (1 << 2) #define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3) #define RS400_PTE_READABLE (1 << 3)
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
uint64_t addr, uint32_t flags)
{ {
uint32_t entry; uint32_t entry;
u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) | entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4); ((upper_32_bits(addr) & 0xff) << 4);
...@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, ...@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry |= RS400_PTE_WRITEABLE; entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP)) if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED; entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry); return entry;
gtt[i] = entry; }
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t entry)
{
u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(entry));
} }
int rs400_mc_wait_for_idle(struct radeon_device *rdev) int rs400_mc_wait_for_idle(struct radeon_device *rdev)
......
...@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev) ...@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev); radeon_gart_table_vram_free(rdev);
} }
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
uint64_t addr, uint32_t flags)
{ {
void __iomem *ptr = (void *)rdev->gart.ptr;
addr = addr & 0xFFFFFFFFFFFFF000ULL; addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_SYSTEM; addr |= R600_PTE_SYSTEM;
if (flags & RADEON_GART_PAGE_VALID) if (flags & RADEON_GART_PAGE_VALID)
...@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, ...@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R600_PTE_WRITEABLE; addr |= R600_PTE_WRITEABLE;
if (flags & RADEON_GART_PAGE_SNOOP) if (flags & RADEON_GART_PAGE_SNOOP)
addr |= R600_PTE_SNOOPED; addr |= R600_PTE_SNOOPED;
writeq(addr, ptr + (i * 8)); return addr;
}
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t entry)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
writeq(entry, ptr + (i * 8));
} }
int rs600_irq_set(struct radeon_device *rdev) int rs600_irq_set(struct radeon_device *rdev)
......
...@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev, ...@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) { if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr); value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) { } else if (flags & R600_PTE_VALID) {
value = addr; value = addr;
} else { } else {
......
...@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv, ...@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
--dev_priv->num_3d_resources; --dev_priv->num_3d_resources;
} else if (unhide_svga) { } else if (unhide_svga) {
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) & vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE); ~SVGA_REG_ENABLE_HIDE);
mutex_unlock(&dev_priv->hw_mutex);
} }
mutex_unlock(&dev_priv->release_mutex); mutex_unlock(&dev_priv->release_mutex);
...@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, ...@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->release_mutex); mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0)) if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv); vmw_release_device(dev_priv);
else if (hide_svga) { else if (hide_svga)
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) | vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE); SVGA_REG_ENABLE_HIDE);
mutex_unlock(&dev_priv->hw_mutex);
}
n3d = (int32_t) dev_priv->num_3d_resources; n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex); mutex_unlock(&dev_priv->release_mutex);
...@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev; dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset; dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100; dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex); mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock); rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem); ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) { for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]); idr_init(&dev_priv->res_idr[i]);
...@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->enable_fb = enable_fbdev; dev_priv->enable_fb = enable_fbdev;
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID); svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) { if (svga_id != SVGA_ID_2) {
ret = -ENOSYS; ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
mutex_unlock(&dev_priv->hw_mutex);
goto out_err0; goto out_err0;
} }
...@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem = dev_priv->vram_size; dev_priv->prim_bb_mem = dev_priv->vram_size;
ret = vmw_dma_masks(dev_priv); ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0)) { if (unlikely(ret != 0))
mutex_unlock(&dev_priv->hw_mutex);
goto out_err0; goto out_err0;
}
/* /*
* Limit back buffer size to VRAM size. Remove this once * Limit back buffer size to VRAM size. Remove this once
...@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->prim_bb_mem > dev_priv->vram_size) if (dev_priv->prim_bb_mem > dev_priv->vram_size)
dev_priv->prim_bb_mem = dev_priv->vram_size; dev_priv->prim_bb_mem = dev_priv->vram_size;
mutex_unlock(&dev_priv->hw_mutex);
vmw_print_capabilities(dev_priv->capabilities); vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_GMR2) { if (dev_priv->capabilities & SVGA_CAP_GMR2) {
...@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev, ...@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_kms_save_vga(dev_priv); vmw_kms_save_vga(dev_priv);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0); vmw_write(dev_priv, SVGA_REG_TRACES, 0);
mutex_unlock(&dev_priv->hw_mutex);
} }
if (active) { if (active) {
...@@ -1196,9 +1184,7 @@ static int vmw_master_set(struct drm_device *dev, ...@@ -1196,9 +1184,7 @@ static int vmw_master_set(struct drm_device *dev,
if (!dev_priv->enable_fb) { if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv); vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true); vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1); vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
} }
return ret; return ret;
} }
...@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev, ...@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
DRM_ERROR("Unable to clean VRAM on master drop.\n"); DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv); vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true); vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1); vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
} }
dev_priv->active_master = &dev_priv->fbdev_master; dev_priv->active_master = &dev_priv->fbdev_master;
...@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev) ...@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID); (void) vmw_read(dev_priv, SVGA_REG_ID);
mutex_unlock(&dev_priv->hw_mutex);
/** /**
* Reclaim 3d reference held by fbdev and potentially * Reclaim 3d reference held by fbdev and potentially
......
...@@ -399,7 +399,8 @@ struct vmw_private { ...@@ -399,7 +399,8 @@ struct vmw_private {
uint32_t memory_size; uint32_t memory_size;
bool has_gmr; bool has_gmr;
bool has_mob; bool has_mob;
struct mutex hw_mutex; spinlock_t hw_lock;
spinlock_t cap_lock;
/* /*
* VGA registers. * VGA registers.
...@@ -449,8 +450,9 @@ struct vmw_private { ...@@ -449,8 +450,9 @@ struct vmw_private {
atomic_t marker_seq; atomic_t marker_seq;
wait_queue_head_t fence_queue; wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue; wait_queue_head_t fifo_queue;
int fence_queue_waiters; /* Protected by hw_mutex */ spinlock_t waiter_lock;
int goal_queue_waiters; /* Protected by hw_mutex */ int fence_queue_waiters; /* Protected by waiter_lock */
int goal_queue_waiters; /* Protected by waiter_lock */
atomic_t fifo_queue_waiters; atomic_t fifo_queue_waiters;
uint32_t last_read_seqno; uint32_t last_read_seqno;
spinlock_t irq_lock; spinlock_t irq_lock;
...@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) ...@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
return (struct vmw_master *) master->driver_priv; return (struct vmw_master *) master->driver_priv;
} }
/*
* The locking here is fine-grained, so that it is performed once
* for every read- and write operation. This is of course costly, but we
* don't perform much register access in the timing critical paths anyway.
* Instead we have the extra benefit of being sure that we don't forget
* the hw lock around register accesses.
*/
static inline void vmw_write(struct vmw_private *dev_priv, static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value) unsigned int offset, uint32_t value)
{ {
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
} }
static inline uint32_t vmw_read(struct vmw_private *dev_priv, static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset) unsigned int offset)
{ {
uint32_t val; unsigned long irq_flags;
u32 val;
spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
return val; return val;
} }
......
...@@ -35,7 +35,7 @@ struct vmw_fence_manager { ...@@ -35,7 +35,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
spinlock_t lock; spinlock_t lock;
struct list_head fence_list; struct list_head fence_list;
struct work_struct work, ping_work; struct work_struct work;
u32 user_fence_size; u32 user_fence_size;
u32 fence_size; u32 fence_size;
u32 event_fence_action_size; u32 event_fence_action_size;
...@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f) ...@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
return "svga"; return "svga";
} }
static void vmw_fence_ping_func(struct work_struct *work)
{
struct vmw_fence_manager *fman =
container_of(work, struct vmw_fence_manager, ping_work);
vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
}
static bool vmw_fence_enable_signaling(struct fence *f) static bool vmw_fence_enable_signaling(struct fence *f)
{ {
struct vmw_fence_obj *fence = struct vmw_fence_obj *fence =
...@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) ...@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false; return false;
if (mutex_trylock(&dev_priv->hw_mutex)) { vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&dev_priv->hw_mutex);
} else
schedule_work(&fman->ping_work);
return true; return true;
} }
...@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) ...@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->fence_list); INIT_LIST_HEAD(&fman->fence_list);
INIT_LIST_HEAD(&fman->cleanup_list); INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func); INIT_WORK(&fman->work, &vmw_fence_work_func);
INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
fman->fifo_down = true; fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
...@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) ...@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
bool lists_empty; bool lists_empty;
(void) cancel_work_sync(&fman->work); (void) cancel_work_sync(&fman->work);
(void) cancel_work_sync(&fman->ping_work);
spin_lock_irqsave(&fman->lock, irq_flags); spin_lock_irqsave(&fman->lock, irq_flags);
lists_empty = list_empty(&fman->fence_list) && lists_empty = list_empty(&fman->fence_list) &&
......
...@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) ...@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!dev_priv->has_mob) if (!dev_priv->has_mob)
return false; return false;
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->cap_lock);
return (result != 0); return (result != 0);
} }
...@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
...@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mb(); mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
mutex_unlock(&dev_priv->hw_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX); max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN); min = ioread32(fifo_mem + SVGA_FIFO_MIN);
...@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
return vmw_fifo_send_fence(dev_priv, &dummy); return vmw_fifo_send_fence(dev_priv, &dummy);
} }
void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{ {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt; __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
static DEFINE_SPINLOCK(ping_lock);
unsigned long irq_flags;
/*
* The ping_lock is needed because we don't have an atomic
* test-and-set of the SVGA_FIFO_BUSY register.
*/
spin_lock_irqsave(&ping_lock, irq_flags);
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason); vmw_write(dev_priv, SVGA_REG_SYNC, reason);
} }
} spin_unlock_irqrestore(&ping_lock, irq_flags);
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
mutex_lock(&dev_priv->hw_mutex);
vmw_fifo_ping_host_locked(dev_priv, reason);
mutex_unlock(&dev_priv->hw_mutex);
} }
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{ {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt; __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
; ;
...@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES, vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state); dev_priv->traces_state);
mutex_unlock(&dev_priv->hw_mutex);
vmw_marker_queue_takedown(&fifo->marker_queue); vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) { if (likely(fifo->static_buffer != NULL)) {
...@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, ...@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return vmw_fifo_wait_noirq(dev_priv, bytes, return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout); interruptible, timeout);
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->waiter_lock);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS, outl(SVGA_IRQFLAG_FIFO_PROGRESS,
...@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, ...@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->waiter_lock);
if (interruptible) if (interruptible)
ret = wait_event_interruptible_timeout ret = wait_event_interruptible_timeout
...@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, ...@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
else if (likely(ret > 0)) else if (likely(ret > 0))
ret = 0; ret = 0;
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->waiter_lock);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->waiter_lock);
return ret; return ret;
} }
......
...@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, ...@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->cap_lock);
for (i = 0; i < max_size; ++i) { for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i; compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
} }
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->cap_lock);
return 0; return 0;
} }
...@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, ...@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (num > SVGA3D_DEVCAP_MAX) if (num > SVGA3D_DEVCAP_MAX)
num = SVGA3D_DEVCAP_MAX; num = SVGA3D_DEVCAP_MAX;
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) { for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
} }
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->cap_lock);
} else if (gb_objects) { } else if (gb_objects) {
ret = vmw_fill_compat_cap(dev_priv, bounce, size); ret = vmw_fill_compat_cap(dev_priv, bounce, size);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
......
...@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) ...@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{ {
uint32_t busy;
mutex_lock(&dev_priv->hw_mutex); return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
busy = vmw_read(dev_priv, SVGA_REG_BUSY);
mutex_unlock(&dev_priv->hw_mutex);
return (busy == 0);
} }
void vmw_update_seqno(struct vmw_private *dev_priv, void vmw_update_seqno(struct vmw_private *dev_priv,
...@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, ...@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
void vmw_seqno_waiter_add(struct vmw_private *dev_priv) void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{ {
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->waiter_lock);
if (dev_priv->fence_queue_waiters++ == 0) { if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags; unsigned long irq_flags;
...@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) ...@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->waiter_lock);
} }
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{ {
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->fence_queue_waiters == 0) { if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags; unsigned long irq_flags;
...@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) ...@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->waiter_lock);
} }
void vmw_goal_waiter_add(struct vmw_private *dev_priv) void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{ {
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->waiter_lock);
if (dev_priv->goal_queue_waiters++ == 0) { if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags; unsigned long irq_flags;
...@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv) ...@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->waiter_lock);
} }
void vmw_goal_waiter_remove(struct vmw_private *dev_priv) void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{ {
mutex_lock(&dev_priv->hw_mutex); spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->goal_queue_waiters == 0) { if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags; unsigned long irq_flags;
...@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv) ...@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
} }
mutex_unlock(&dev_priv->hw_mutex); spin_unlock(&dev_priv->waiter_lock);
} }
int vmw_wait_seqno(struct vmw_private *dev_priv, int vmw_wait_seqno(struct vmw_private *dev_priv,
...@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev) ...@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return; return;
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
mutex_unlock(&dev_priv->hw_mutex);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
......
...@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) ...@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector); struct vmw_display_unit *du = vmw_connector_to_du(connector);
mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
mutex_unlock(&dev_priv->hw_mutex);
return ((vmw_connector_to_du(connector)->unit < num_displays && return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ? du->pref_active) ?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment