Commit de151cf6 authored by Jesse Barnes's avatar Jesse Barnes Committed by Dave Airlie

drm/i915: add GEM GTT mapping support

Use the new core GEM object mapping code to allow GTT mapping of GEM
objects on i915.  The fault handler will make sure a fence register is
allocated too, if the object in question is tiled.
Signed-off-by: default avatarJesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent a2c0a97b
...@@ -991,6 +991,7 @@ struct drm_ioctl_desc i915_ioctls[] = { ...@@ -991,6 +991,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
......
...@@ -81,6 +81,10 @@ static int i915_resume(struct drm_device *dev) ...@@ -81,6 +81,10 @@ static int i915_resume(struct drm_device *dev)
return 0; return 0;
} }
static struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
};
static struct drm_driver driver = { static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should /* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware. * deal with them for intel hardware.
...@@ -113,13 +117,14 @@ static struct drm_driver driver = { ...@@ -113,13 +117,14 @@ static struct drm_driver driver = {
.proc_cleanup = i915_gem_proc_cleanup, .proc_cleanup = i915_gem_proc_cleanup,
.gem_init_object = i915_gem_init_object, .gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object, .gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.ioctls = i915_ioctls, .ioctls = i915_ioctls,
.fops = { .fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = drm_open, .open = drm_open,
.release = drm_release, .release = drm_release,
.ioctl = drm_ioctl, .ioctl = drm_ioctl,
.mmap = drm_mmap, .mmap = drm_gem_mmap,
.poll = drm_poll, .poll = drm_poll,
.fasync = drm_fasync, .fasync = drm_fasync,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
......
...@@ -107,6 +107,11 @@ struct drm_i915_master_private { ...@@ -107,6 +107,11 @@ struct drm_i915_master_private {
drm_local_map_t *sarea; drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv; struct _drm_i915_sarea *sarea_priv;
}; };
#define I915_FENCE_REG_NONE -1
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
};
typedef struct drm_i915_private { typedef struct drm_i915_private {
struct drm_device *dev; struct drm_device *dev;
...@@ -149,6 +154,10 @@ typedef struct drm_i915_private { ...@@ -149,6 +154,10 @@ typedef struct drm_i915_private {
struct intel_opregion opregion; struct intel_opregion opregion;
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
/* Register state */ /* Register state */
u8 saveLBB; u8 saveLBB;
u32 saveDSPACNTR; u32 saveDSPACNTR;
...@@ -367,6 +376,21 @@ struct drm_i915_gem_object { ...@@ -367,6 +376,21 @@ struct drm_i915_gem_object {
* This is the same as gtt_space->start * This is the same as gtt_space->start
*/ */
uint32_t gtt_offset; uint32_t gtt_offset;
/**
* Required alignment for the object
*/
uint32_t gtt_alignment;
/**
* Fake offset for use by mmap(2)
*/
uint64_t mmap_offset;
/**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
*/
int fence_reg;
/** Boolean whether this object has a valid gtt offset. */ /** Boolean whether this object has a valid gtt offset. */
int gtt_bound; int gtt_bound;
...@@ -379,6 +403,7 @@ struct drm_i915_gem_object { ...@@ -379,6 +403,7 @@ struct drm_i915_gem_object {
/** Current tiling mode for the object. */ /** Current tiling mode for the object. */
uint32_t tiling_mode; uint32_t tiling_mode;
uint32_t stride;
/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
uint32_t agp_type; uint32_t agp_type;
...@@ -493,6 +518,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -493,6 +518,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
...@@ -529,6 +556,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev); ...@@ -529,6 +556,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_work_handler(struct work_struct *work); void i915_gem_retire_work_handler(struct work_struct *work);
void i915_gem_clflush_object(struct drm_gem_object *obj); void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* i915_gem_tiling.c */ /* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
...@@ -584,6 +612,13 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } ...@@ -584,6 +612,13 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
#define I915_READ8(reg) readb(dev_priv->regs + (reg)) #define I915_READ8(reg) readb(dev_priv->regs + (reg))
#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
#ifdef writeq
#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
#else
#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \
writel(upper_32_bits(val), dev_priv->regs + \
(reg) + 4))
#endif
#define I915_VERBOSE 0 #define I915_VERBOSE 0
......
...@@ -51,6 +51,11 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o ...@@ -51,6 +51,11 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
static int i915_gem_object_get_page_list(struct drm_gem_object *obj); static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
static void i915_gem_object_free_page_list(struct drm_gem_object *obj); static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
static int i915_gem_evict_something(struct drm_device *dev);
static void static void
i915_gem_cleanup_ringbuffer(struct drm_device *dev); i915_gem_cleanup_ringbuffer(struct drm_device *dev);
...@@ -529,6 +534,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, ...@@ -529,6 +534,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
/**
* i915_gem_fault - fault a page into the GTT
* vma: VMA in question
* vmf: fault info
*
* The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
* from userspace. The fault handler takes care of binding the object to
* the GTT (if needed), allocating and programming a fence register (again,
* only if needed based on whether the old reg is still valid or the object
* is tiled) and inserting a new PTE into the faulting process.
*
* Note that the faulting process may involve evicting existing objects
* from the GTT and/or fence registers to make room. So performance may
* suffer if the GTT working set is large or there are few fence registers
* left.
*/
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
if (!obj_priv->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return VM_FAULT_SIGBUS;
}
list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
}
/* Need a new fence register? */
if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE)
i915_gem_object_get_fence_reg(obj);
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
mutex_unlock(&dev->struct_mutex);
switch (ret) {
case -ENOMEM:
case -EAGAIN:
return VM_FAULT_OOM;
case -EFAULT:
case -EBUSY:
DRM_ERROR("can't insert pfn?? fault or busy...\n");
return VM_FAULT_SIGBUS;
default:
return VM_FAULT_NOPAGE;
}
}
/**
* i915_gem_create_mmap_offset - create a fake mmap offset for an object
* @obj: obj in question
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures.
*
* This routine allocates and attaches a fake offset for @obj.
*/
static int
i915_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_map_list *list;
struct drm_map *map;
int ret = 0;
/* Set the object up for mmap'ing */
list = &obj->map_list;
list->map = drm_calloc(1, sizeof(struct drm_map_list),
DRM_MEM_DRIVER);
if (!list->map)
return -ENOMEM;
map = list->map;
map->type = _DRM_GEM;
map->size = obj->size;
map->handle = obj;
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
obj->size / PAGE_SIZE, 0, 0);
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
ret = -ENOMEM;
goto out_free_list;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
obj->size / PAGE_SIZE, 0);
if (!list->file_offset_node) {
ret = -ENOMEM;
goto out_free_list;
}
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
DRM_ERROR("failed to add to map hash\n");
goto out_free_mm;
}
/* By now we should be all set, any drm_mmap request on the offset
* below will get to our mmap & fault handler */
obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
return 0;
out_free_mm:
drm_mm_put_block(list->file_offset_node);
out_free_list:
drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
return ret;
}
/**
* i915_gem_get_gtt_alignment - return required GTT alignment for an object
* @obj: object to check
*
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping if needed.
*/
static uint32_t
i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int start, i;
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
if (IS_I9XX(dev))
start = 1024*1024;
else
start = 512*1024;
for (i = start; i < obj->size; i <<= 1)
;
return i;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file_priv: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
* up so we can get faults in the handler above.
*
* The fault handler will take care of binding the object into the GTT
* (since it may have been evicted to make room for something), allocating
* a fence register, and mapping the appropriate aperture address into
* userspace.
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_mmap_gtt *args = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
mutex_lock(&dev->struct_mutex);
obj_priv = obj->driver_private;
if (!obj_priv->mmap_offset) {
ret = i915_gem_create_mmap_offset(obj);
if (ret)
return ret;
}
args->offset = obj_priv->mmap_offset;
obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
/* Make sure the alignment is correct for fence regs etc */
if (obj_priv->agp_mem &&
(obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
/*
* Pull it into the GTT so that we have a page list (makes the
* initial fault faster and any subsequent flushing possible).
*/
if (!obj_priv->agp_mem) {
ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
if (ret) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
}
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static void static void
i915_gem_object_free_page_list(struct drm_gem_object *obj) i915_gem_object_free_page_list(struct drm_gem_object *obj)
{ {
...@@ -726,6 +977,7 @@ i915_gem_retire_request(struct drm_device *dev, ...@@ -726,6 +977,7 @@ i915_gem_retire_request(struct drm_device *dev,
*/ */
if (obj_priv->last_rendering_seqno != request->seqno) if (obj_priv->last_rendering_seqno != request->seqno)
return; return;
#if WATCH_LRU #if WATCH_LRU
DRM_INFO("%s: retire %d moves to inactive list %p\n", DRM_INFO("%s: retire %d moves to inactive list %p\n",
__func__, request->seqno, obj); __func__, request->seqno, obj);
...@@ -956,6 +1208,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) ...@@ -956,6 +1208,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_gem_object *obj_priv = obj->driver_private;
loff_t offset;
int ret = 0; int ret = 0;
#if WATCH_BUF #if WATCH_BUF
...@@ -991,6 +1244,13 @@ i915_gem_object_unbind(struct drm_gem_object *obj) ...@@ -991,6 +1244,13 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
BUG_ON(obj_priv->active); BUG_ON(obj_priv->active);
/* blow away mappings if mapped through GTT */
offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
i915_gem_object_free_page_list(obj); i915_gem_object_free_page_list(obj);
if (obj_priv->gtt_space) { if (obj_priv->gtt_space) {
...@@ -1149,6 +1409,203 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) ...@@ -1149,6 +1409,203 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
return 0; return 0;
} }
static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
{
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int regnum = obj_priv->fence_reg;
uint64_t val;
val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
0xfffff000) << 32;
val |= obj_priv->gtt_offset & 0xfffff000;
val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
}
static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
{
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int regnum = obj_priv->fence_reg;
uint32_t val;
uint32_t pitch_val;
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (obj->size - 1))) {
WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
return;
}
if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) ||
IS_I945GM(dev) ||
IS_G33(dev)))
pitch_val = (obj_priv->stride / 128) - 1;
else
pitch_val = (obj_priv->stride / 512) - 1;
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(obj->size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
{
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int regnum = obj_priv->fence_reg;
uint32_t val;
uint32_t pitch_val;
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (obj->size - 1))) {
WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
return;
}
pitch_val = (obj_priv->stride / 128) - 1;
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(obj->size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
/**
* i915_gem_object_get_fence_reg - set up a fence reg for an object
* @obj: object to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
*
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
*/
static void
i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_i915_fence_reg *reg = NULL;
int i, ret;
switch (obj_priv->tiling_mode) {
case I915_TILING_NONE:
WARN(1, "allocating a fence for non-tiled object?\n");
break;
case I915_TILING_X:
WARN(obj_priv->stride & (512 - 1),
"object is X tiled but has non-512B pitch\n");
break;
case I915_TILING_Y:
WARN(obj_priv->stride & (128 - 1),
"object is Y tiled but has non-128B pitch\n");
break;
}
/* First try to find a free reg */
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
break;
}
/* None available, try to steal one or wait for a user to finish */
if (i == dev_priv->num_fence_regs) {
struct drm_i915_gem_object *old_obj_priv = NULL;
loff_t offset;
try_again:
/* Could try to use LRU here instead... */
for (i = dev_priv->fence_reg_start;
i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
old_obj_priv = reg->obj->driver_private;
if (!old_obj_priv->pin_count)
break;
}
/*
* Now things get ugly... we have to wait for one of the
* objects to finish before trying again.
*/
if (i == dev_priv->num_fence_regs) {
ret = i915_gem_object_wait_rendering(reg->obj);
if (ret) {
WARN(ret, "wait_rendering failed: %d\n", ret);
return;
}
goto try_again;
}
/*
* Zap this virtual mapping so we can set up a fence again
* for this object next time we need it.
*/
offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
unmap_mapping_range(dev->dev_mapping, offset,
reg->obj->size, 1);
old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
}
obj_priv->fence_reg = i;
reg->obj = obj;
if (IS_I965G(dev))
i965_write_fence_reg(reg);
else if (IS_I9XX(dev))
i915_write_fence_reg(reg);
else
i830_write_fence_reg(reg);
}
/**
* i915_gem_clear_fence_reg - clear out fence register info
* @obj: object to clear
*
* Zeroes out the fence register itself and clears out the associated
* data structures in dev_priv and obj_priv.
*/
static void
i915_gem_clear_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
if (IS_I965G(dev))
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
else
I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
}
/** /**
* Finds free space in the GTT aperture and binds the object there. * Finds free space in the GTT aperture and binds the object there.
*/ */
...@@ -2351,12 +2808,18 @@ int i915_gem_init_object(struct drm_gem_object *obj) ...@@ -2351,12 +2808,18 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj->driver_private = obj_priv; obj->driver_private = obj_priv;
obj_priv->obj = obj; obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list); INIT_LIST_HEAD(&obj_priv->list);
return 0; return 0;
} }
void i915_gem_free_object(struct drm_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
struct drm_map *map;
struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_gem_object *obj_priv = obj->driver_private;
while (obj_priv->pin_count > 0) while (obj_priv->pin_count > 0)
...@@ -2364,6 +2827,20 @@ void i915_gem_free_object(struct drm_gem_object *obj) ...@@ -2364,6 +2827,20 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_object_unbind(obj); i915_gem_object_unbind(obj);
list = &obj->map_list;
drm_ht_remove_item(&mm->offset_hash, &list->hash);
if (list->file_offset_node) {
drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
map = list->map;
if (map) {
drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
list->map = NULL;
}
drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
} }
...@@ -2432,8 +2909,7 @@ i915_gem_idle(struct drm_device *dev) ...@@ -2432,8 +2909,7 @@ i915_gem_idle(struct drm_device *dev)
*/ */
i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
I915_GEM_DOMAIN_GTT));
if (seqno == 0) { if (seqno == 0) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -2758,5 +3234,13 @@ i915_gem_load(struct drm_device *dev) ...@@ -2758,5 +3234,13 @@ i915_gem_load(struct drm_device *dev)
i915_gem_retire_work_handler); i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1; dev_priv->mm.next_gem_seqno = 1;
/* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3;
if (IS_I965G(dev))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
i915_gem_detect_bit_6_swizzle(dev); i915_gem_detect_bit_6_swizzle(dev);
} }
...@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
} }
} }
obj_priv->tiling_mode = args->tiling_mode; obj_priv->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -175,9 +175,26 @@ ...@@ -175,9 +175,26 @@
#define DISPLAY_PLANE_B (1<<20) #define DISPLAY_PLANE_B (1<<20)
/* /*
* Instruction and interrupt control regs * Fence registers
*/ */
#define FENCE_REG_830_0 0x2000
#define I830_FENCE_START_MASK 0x07f80000
#define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
#define I915_FENCE_START_MASK 0x0ff00000
#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8)
#define FENCE_REG_965_0 0x03000
#define I965_FENCE_PITCH_SHIFT 2
#define I965_FENCE_TILING_Y_SHIFT 1
#define I965_FENCE_REG_VALID (1<<0)
/*
* Instruction and interrupt control regs
*/
#define PRB0_TAIL 0x02030 #define PRB0_TAIL 0x02030
#define PRB0_HEAD 0x02034 #define PRB0_HEAD 0x02034
#define PRB0_START 0x02038 #define PRB0_START 0x02038
...@@ -245,6 +262,7 @@ ...@@ -245,6 +262,7 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */
/* /*
* Framebuffer compression (915+ only) * Framebuffer compression (915+ only)
*/ */
......
...@@ -160,6 +160,7 @@ typedef struct _drm_i915_sarea { ...@@ -160,6 +160,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_SET_TILING 0x21 #define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22 #define DRM_I915_GEM_GET_TILING 0x22
#define DRM_I915_GEM_GET_APERTURE 0x23 #define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
...@@ -187,6 +188,7 @@ typedef struct _drm_i915_sarea { ...@@ -187,6 +188,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
...@@ -382,6 +384,18 @@ struct drm_i915_gem_mmap { ...@@ -382,6 +384,18 @@ struct drm_i915_gem_mmap {
uint64_t addr_ptr; uint64_t addr_ptr;
}; };
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
uint32_t handle;
uint32_t pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t offset;
};
struct drm_i915_gem_set_domain { struct drm_i915_gem_set_domain {
/** Handle for the object */ /** Handle for the object */
uint32_t handle; uint32_t handle;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment