Commit 6df944c5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-gem-update' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-gem-update' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/i915: Return error in i915_gem_set_to_gtt_domain if we're not in the GTT.
  drm/i915: Retry execbuffer pinning after clearing the GTT
  drm/i915: Move the execbuffer domain computations together
  drm/i915: Rename object_set_domain to object_set_to_gpu_domain
  drm/i915: Make a single set-to-cpu-domain path and use it wherever needed.
  drm/i915: Make a single set-to-gtt-domain path.
  drm/i915: If interrupted while setting object domains, still emit the flush.
  drm/i915: Move flushing list cleanup from flush request retire to request emit.
  drm/i915: Respect GM965/GM45 bit-17-instead-of-bit-11 option for swizzling.
parents 341e5580 02354392
...@@ -244,6 +244,10 @@ typedef struct drm_i915_private { ...@@ -244,6 +244,10 @@ typedef struct drm_i915_private {
* List of objects currently involved in rendering from the * List of objects currently involved in rendering from the
* ringbuffer. * ringbuffer.
* *
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list. * A reference is held on the buffer while on this list.
*/ */
struct list_head active_list; struct list_head active_list;
...@@ -253,6 +257,8 @@ typedef struct drm_i915_private { ...@@ -253,6 +257,8 @@ typedef struct drm_i915_private {
* still have a write_domain which needs to be flushed before * still have a write_domain which needs to be flushed before
* unbinding. * unbinding.
* *
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is held on the buffer while on this list. * A reference is held on the buffer while on this list.
*/ */
struct list_head flushing_list; struct list_head flushing_list;
...@@ -261,6 +267,8 @@ typedef struct drm_i915_private { ...@@ -261,6 +267,8 @@ typedef struct drm_i915_private {
* LRU list of objects which are not in the ringbuffer and * LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT. * are ready to unbind, but are still in the GTT.
* *
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list, * A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being * as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path. * freed, and we'll pull it off the list in the free path.
...@@ -371,8 +379,8 @@ struct drm_i915_gem_object { ...@@ -371,8 +379,8 @@ struct drm_i915_gem_object {
uint32_t agp_type; uint32_t agp_type;
/** /**
* Flagging of which individual pages are valid in GEM_DOMAIN_CPU when * If present, while GEM_DOMAIN_CPU is in the read domain this array
* GEM_DOMAIN_CPU is not in the object's read domain. * flags which individual pages are valid.
*/ */
uint8_t *page_cpu_valid; uint8_t *page_cpu_valid;
}; };
...@@ -394,9 +402,6 @@ struct drm_i915_gem_request { ...@@ -394,9 +402,6 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */ /** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies; unsigned long emitted_jiffies;
/** Cache domains that were flushed at the start of the request. */
uint32_t flush_domains;
struct list_head list; struct list_head list;
}; };
......
...@@ -33,21 +33,21 @@ ...@@ -33,21 +33,21 @@
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
static int static void
i915_gem_object_set_domain(struct drm_gem_object *obj, i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
uint32_t read_domains, uint32_t read_domains,
uint32_t write_domain); uint32_t write_domain);
static int static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
i915_gem_object_set_domain_range(struct drm_gem_object *obj, static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
int write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t offset,
uint64_t size, uint64_t size);
uint32_t read_domains, static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
uint32_t write_domain);
static int
i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain);
static int i915_gem_object_get_page_list(struct drm_gem_object *obj); static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
static void i915_gem_object_free_page_list(struct drm_gem_object *obj); static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
...@@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ...@@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
I915_GEM_DOMAIN_CPU, 0); args->size);
if (ret != 0) { if (ret != 0) {
drm_gem_object_unreference(obj); drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
ret = i915_gem_set_domain(obj, file_priv, ret = i915_gem_object_set_to_gtt_domain(obj, 1);
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
if (ret) if (ret)
goto fail; goto fail;
...@@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = i915_gem_set_domain(obj, file_priv, ret = i915_gem_object_set_to_cpu_domain(obj, 1);
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
if (ret) { if (ret) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
} }
/** /**
* Called when user space prepares to use an object * Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
*/ */
int int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
...@@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{ {
struct drm_i915_gem_set_domain *args = data; struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj; struct drm_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret; int ret;
if (!(dev->driver->driver_features & DRIVER_GEM)) if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV; return -ENODEV;
/* Only handle setting domains to types used by the CPU. */
if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
return -EINVAL;
if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
return -EINVAL;
/* Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) if (obj == NULL)
return -EBADF; return -EBADF;
...@@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
#if WATCH_BUF #if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
obj, obj->size, args->read_domains, args->write_domain); obj, obj->size, read_domains, write_domain);
#endif #endif
ret = i915_gem_set_domain(obj, file_priv, if (read_domains & I915_GEM_DOMAIN_GTT) {
args->read_domains, args->write_domain); ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
*/
if (ret == -EINVAL)
ret = 0;
} else {
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
drm_gem_object_unreference(obj); drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ...@@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private; obj_priv = obj->driver_private;
/* Pinned buffers may be scanout, so flush the cache */ /* Pinned buffers may be scanout, so flush the cache */
if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { if (obj_priv->pin_count)
i915_gem_clflush_object(obj); i915_gem_object_flush_cpu_write_domain(obj);
drm_agp_chipset_flush(dev);
}
drm_gem_object_unreference(obj); drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj) ...@@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
} }
static void static void
i915_gem_object_move_to_active(struct drm_gem_object *obj) i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj) ...@@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
/* Move from whatever list we were on to the tail of execution. */ /* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list, list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list); &dev_priv->mm.active_list);
obj_priv->last_rendering_seqno = seqno;
} }
static void
i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
BUG_ON(!obj_priv->active);
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
obj_priv->last_rendering_seqno = 0;
}
static void static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj) i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
...@@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) ...@@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) { if (obj_priv->active) {
obj_priv->active = 0; obj_priv->active = 0;
drm_gem_object_unreference(obj); drm_gem_object_unreference(obj);
...@@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) ...@@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->seqno = seqno; request->seqno = seqno;
request->emitted_jiffies = jiffies; request->emitted_jiffies = jiffies;
request->flush_domains = flush_domains;
was_empty = list_empty(&dev_priv->mm.request_list); was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list); list_add_tail(&request->list, &dev_priv->mm.request_list);
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
if (flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
}
}
if (was_empty && !dev_priv->mm.suspended) if (was_empty && !dev_priv->mm.suspended)
schedule_delayed_work(&dev_priv->mm.retire_work, HZ); schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
return seqno; return seqno;
...@@ -676,31 +731,11 @@ i915_gem_retire_request(struct drm_device *dev, ...@@ -676,31 +731,11 @@ i915_gem_retire_request(struct drm_device *dev,
__func__, request->seqno, obj); __func__, request->seqno, obj);
#endif #endif
if (obj->write_domain != 0) { if (obj->write_domain != 0)
list_move_tail(&obj_priv->list, i915_gem_object_move_to_flushing(obj);
&dev_priv->mm.flushing_list); else
} else {
i915_gem_object_move_to_inactive(obj);
}
}
if (request->flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
/* Clear the write domain and activity from any buffers
* that are just waiting for a flush matching the one retired.
*/
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if (obj->write_domain & request->flush_domains) {
obj->write_domain = 0;
i915_gem_object_move_to_inactive(obj); i915_gem_object_move_to_inactive(obj);
} }
}
}
} }
/** /**
...@@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) ...@@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret; int ret;
/* If there are writes queued to the buffer, flush and /* This function only exists to support waiting for existing rendering,
* create a new seqno to wait for. * not for emitting required flushes.
*/ */
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
uint32_t write_domain = obj->write_domain;
#if WATCH_BUF
DRM_INFO("%s: flushing object %p from write domain %08x\n",
__func__, obj, write_domain);
#endif
i915_gem_flush(dev, 0, write_domain);
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = i915_add_request(dev,
write_domain);
BUG_ON(obj_priv->last_rendering_seqno == 0);
#if WATCH_LRU
DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
#endif
}
/* If there is rendering queued on the buffer being evicted, wait for /* If there is rendering queued on the buffer being evicted, wait for
* it. * it.
...@@ -950,23 +970,15 @@ i915_gem_object_unbind(struct drm_gem_object *obj) ...@@ -950,23 +970,15 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL; return -EINVAL;
} }
/* Wait for any rendering to complete
*/
ret = i915_gem_object_wait_rendering(obj);
if (ret) {
DRM_ERROR("wait_rendering failed: %d\n", ret);
return ret;
}
/* Move the object to the CPU domain to ensure that /* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT * any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will * are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished * also ensure that all pending GPU writes are finished
* before we unbind. * before we unbind.
*/ */
ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, ret = i915_gem_object_set_to_cpu_domain(obj, 1);
I915_GEM_DOMAIN_CPU);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("set_domain failed: %d\n", ret); DRM_ERROR("set_domain failed: %d\n", ret);
return ret; return ret;
} }
...@@ -1082,6 +1094,19 @@ i915_gem_evict_something(struct drm_device *dev) ...@@ -1082,6 +1094,19 @@ i915_gem_evict_something(struct drm_device *dev)
return ret; return ret;
} }
static int
i915_gem_evict_everything(struct drm_device *dev)
{
int ret;
for (;;) {
ret = i915_gem_evict_something(dev);
if (ret != 0)
break;
}
return ret;
}
static int static int
i915_gem_object_get_page_list(struct drm_gem_object *obj) i915_gem_object_get_page_list(struct drm_gem_object *obj)
{ {
...@@ -1168,6 +1193,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) ...@@ -1168,6 +1193,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev); ret = i915_gem_evict_something(dev);
if (ret != 0) { if (ret != 0) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to evict a buffer %d\n", ret); DRM_ERROR("Failed to evict a buffer %d\n", ret);
return ret; return ret;
} }
...@@ -1228,6 +1254,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj) ...@@ -1228,6 +1254,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
} }
/** Flushes any GPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t seqno;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
/* Queue the GPU write cache flushing we need. */
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
return;
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
obj->write_domain = 0;
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
}
/**
* Moves a single object to the GTT read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
/* Not valid to be called on unbound objects. */
if (obj_priv->gtt_space == NULL)
return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
if (write)
obj->read_domains &= I915_GEM_DOMAIN_GTT;
i915_gem_object_flush_cpu_write_domain(obj);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
}
return 0;
}
/**
* Moves a single object to the CPU read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
struct drm_device *dev = obj->dev;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
/* If we have a partially-valid cache of the object in the CPU,
* finish invalidating it and free the per-page flags.
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
obj->read_domains &= I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
return 0;
}
/* /*
* Set the next domain for the specified object. This * Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though, * may not actually perform the necessary flushing/invaliding though,
...@@ -1339,8 +1502,8 @@ i915_gem_clflush_object(struct drm_gem_object *obj) ...@@ -1339,8 +1502,8 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* MI_FLUSH * MI_FLUSH
* drm_agp_chipset_flush * drm_agp_chipset_flush
*/ */
static int static void
i915_gem_object_set_domain(struct drm_gem_object *obj, i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
uint32_t read_domains, uint32_t read_domains,
uint32_t write_domain) uint32_t write_domain)
{ {
...@@ -1348,7 +1511,9 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, ...@@ -1348,7 +1511,9 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0; uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
int ret;
BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
#if WATCH_BUF #if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
...@@ -1385,34 +1550,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, ...@@ -1385,34 +1550,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
__func__, flush_domains, invalidate_domains); __func__, flush_domains, invalidate_domains);
#endif #endif
/*
* If we're invaliding the CPU cache and flushing a GPU cache,
* then pause for rendering so that the GPU caches will be
* flushed before the cpu cache is invalidated
*/
if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
(flush_domains & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT))) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
} }
if ((write_domain | flush_domains) != 0) if ((write_domain | flush_domains) != 0)
obj->write_domain = write_domain; obj->write_domain = write_domain;
/* If we're invalidating the CPU domain, clear the per-page CPU
* domain list as well.
*/
if (obj_priv->page_cpu_valid != NULL &&
(write_domain != 0 ||
read_domains & I915_GEM_DOMAIN_CPU)) {
drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
obj_priv->page_cpu_valid = NULL;
}
obj->read_domains = read_domains; obj->read_domains = read_domains;
dev->invalidate_domains |= invalidate_domains; dev->invalidate_domains |= invalidate_domains;
...@@ -1423,47 +1565,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, ...@@ -1423,47 +1565,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
obj->read_domains, obj->write_domain, obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains); dev->invalidate_domains, dev->flush_domains);
#endif #endif
return 0;
} }
/** /**
* Set the read/write domain on a range of the object. * Moves the object from a partially CPU read to a full one.
* *
* Currently only implemented for CPU reads, otherwise drops to normal * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
* i915_gem_object_set_domain(). * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/ */
static int static void
i915_gem_object_set_domain_range(struct drm_gem_object *obj, i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
uint64_t offset,
uint64_t size,
uint32_t read_domains,
uint32_t write_domain)
{ {
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret, i;
if (obj->read_domains & I915_GEM_DOMAIN_CPU) if (!obj_priv->page_cpu_valid)
return 0; return;
/* If we're partially in the CPU read domain, finish moving it in.
*/
if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
int i;
for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
if (obj_priv->page_cpu_valid[i])
continue;
drm_clflush_pages(obj_priv->page_list + i, 1);
}
drm_agp_chipset_flush(dev);
}
if (read_domains != I915_GEM_DOMAIN_CPU || /* Free the page_cpu_valid mappings which are now stale, whether
write_domain != 0) * or not we've got I915_GEM_DOMAIN_CPU.
return i915_gem_object_set_domain(obj, */
read_domains, write_domain); drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
obj_priv->page_cpu_valid = NULL;
}
/* Wait on any GPU rendering to the object to be flushed. */ /**
* Set the CPU read domain on a range of the object.
*
* The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
* not entirely valid. The page_cpu_valid member of the object flags which
* pages have been flushed, and will be respected by
* i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
* of the whole object.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj); ret = i915_gem_object_wait_rendering(obj);
if (ret) if (ret != 0)
return ret; return ret;
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
if (obj_priv->page_cpu_valid == NULL &&
(obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
return 0;
/* Otherwise, create/clear the per-page CPU read domain flag if we're
* newly adding I915_GEM_DOMAIN_CPU
*/
if (obj_priv->page_cpu_valid == NULL) { if (obj_priv->page_cpu_valid == NULL) {
obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
} if (obj_priv->page_cpu_valid == NULL)
return -ENOMEM;
} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's /* Flush the cache on any pages that are still invalid from the CPU's
* perspective. * perspective.
*/ */
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
i++) {
if (obj_priv->page_cpu_valid[i]) if (obj_priv->page_cpu_valid[i])
continue; continue;
...@@ -1472,39 +1661,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj, ...@@ -1472,39 +1661,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
obj_priv->page_cpu_valid[i] = 1; obj_priv->page_cpu_valid[i] = 1;
} }
return 0; /* It should now be out of any other write domains, and we can update
} * the domain values for our changes.
/**
* Once all of the objects have been set in the proper domain,
* perform the necessary flush and invalidate operations.
*
* Returns the write domains flushed, for use in flush tracking.
*/ */
static uint32_t BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
i915_gem_dev_set_domain(struct drm_device *dev)
{
uint32_t flush_domains = dev->flush_domains;
/* obj->read_domains |= I915_GEM_DOMAIN_CPU;
* Now that all the buffers are synced to the proper domains,
* flush and invalidate the collected domains
*/
if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
__func__,
dev->invalidate_domains,
dev->flush_domains);
#endif
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
dev->invalidate_domains = 0;
dev->flush_domains = 0;
}
return flush_domains; return 0;
} }
/** /**
...@@ -1585,6 +1749,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, ...@@ -1585,6 +1749,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL; return -EINVAL;
} }
if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
reloc.read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc.target_handle,
(int) reloc.offset,
reloc.read_domains,
reloc.write_domain);
return -EINVAL;
}
if (reloc.write_domain && target_obj->pending_write_domain && if (reloc.write_domain && target_obj->pending_write_domain &&
reloc.write_domain != target_obj->pending_write_domain) { reloc.write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: " DRM_ERROR("Write domain conflict: "
...@@ -1625,19 +1801,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, ...@@ -1625,19 +1801,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue; continue;
} }
/* Now that we're going to actually write some data in, ret = i915_gem_object_set_to_gtt_domain(obj, 1);
* make sure that any rendering using this buffer's contents if (ret != 0) {
* is completed. drm_gem_object_unreference(target_obj);
*/ i915_gem_object_unpin(obj);
i915_gem_object_wait_rendering(obj); return -EINVAL;
/* As we're writing through the gtt, flush
* any CPU writes before we write the relocations
*/
if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
} }
/* Map the page containing the relocation we're going to /* Map the page containing the relocation we're going to
...@@ -1779,6 +1947,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1779,6 +1947,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i, pinned = 0; int ret, i, pinned = 0;
uint64_t exec_offset; uint64_t exec_offset;
uint32_t seqno, flush_domains; uint32_t seqno, flush_domains;
int pin_tries;
#if WATCH_EXEC #if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
...@@ -1827,14 +1996,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1827,14 +1996,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EBUSY; return -EBUSY;
} }
/* Zero the gloabl flush/invalidate flags. These /* Look up object handles */
* will be modified as each object is bound to the
* gtt
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
/* Look up object handles and perform the relocations */
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv, object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle); exec_list[i].handle);
...@@ -1844,17 +2006,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1844,17 +2006,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = -EBADF; ret = -EBADF;
goto err; goto err;
} }
}
/* Pin and relocate */
for (pin_tries = 0; ; pin_tries++) {
ret = 0;
for (i = 0; i < args->buffer_count; i++) {
object_list[i]->pending_read_domains = 0; object_list[i]->pending_read_domains = 0;
object_list[i]->pending_write_domain = 0; object_list[i]->pending_write_domain = 0;
ret = i915_gem_object_pin_and_relocate(object_list[i], ret = i915_gem_object_pin_and_relocate(object_list[i],
file_priv, file_priv,
&exec_list[i]); &exec_list[i]);
if (ret) { if (ret)
DRM_ERROR("object bind and relocate failed %d\n", ret); break;
pinned = i + 1;
}
/* success */
if (ret == 0)
break;
/* error other than GTT full, or we've already tried again */
if (ret != -ENOMEM || pin_tries >= 1) {
DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err; goto err;
} }
pinned = i + 1;
/* unpin all of our buffers */
for (i = 0; i < pinned; i++)
i915_gem_object_unpin(object_list[i]);
/* evict everyone we can from the aperture */
ret = i915_gem_evict_everything(dev);
if (ret)
goto err;
} }
/* Set the pending read domains for the batch buffer to COMMAND */ /* Set the pending read domains for the batch buffer to COMMAND */
...@@ -1864,21 +2048,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1864,21 +2048,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__); i915_verify_inactive(dev, __FILE__, __LINE__);
/* Zero the global flush/invalidate flags. These
* will be modified as new domains are computed
* for each object
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i]; struct drm_gem_object *obj = object_list[i];
/* make sure all previous memory operations have passed */ /* Compute new gpu domains and update invalidate/flush */
ret = i915_gem_object_set_domain(obj, i915_gem_object_set_to_gpu_domain(obj,
obj->pending_read_domains, obj->pending_read_domains,
obj->pending_write_domain); obj->pending_write_domain);
if (ret)
goto err;
} }
i915_verify_inactive(dev, __FILE__, __LINE__); i915_verify_inactive(dev, __FILE__, __LINE__);
/* Flush/invalidate caches and chipset buffer */ if (dev->invalidate_domains | dev->flush_domains) {
flush_domains = i915_gem_dev_set_domain(dev); #if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
__func__,
dev->invalidate_domains,
dev->flush_domains);
#endif
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
if (dev->flush_domains)
(void)i915_add_request(dev, dev->flush_domains);
}
i915_verify_inactive(dev, __FILE__, __LINE__); i915_verify_inactive(dev, __FILE__, __LINE__);
...@@ -1898,8 +2098,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1898,8 +2098,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
~0); ~0);
#endif #endif
(void)i915_add_request(dev, flush_domains);
/* Exec the batchbuffer */ /* Exec the batchbuffer */
ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
if (ret) { if (ret) {
...@@ -1927,10 +2125,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1927,10 +2125,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_file_priv->mm.last_gem_seqno = seqno; i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i]; struct drm_gem_object *obj = object_list[i];
struct drm_i915_gem_object *obj_priv = obj->driver_private;
i915_gem_object_move_to_active(obj); i915_gem_object_move_to_active(obj, seqno);
obj_priv->last_rendering_seqno = seqno;
#if WATCH_LRU #if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj); DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif #endif
...@@ -2061,11 +2257,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -2061,11 +2257,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
/* XXX - flush the CPU caches for pinned objects /* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet * as the X server doesn't manage domains yet
*/ */
if (obj->write_domain & I915_GEM_DOMAIN_CPU) { i915_gem_object_flush_cpu_write_domain(obj);
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
}
args->offset = obj_priv->gtt_offset; args->offset = obj_priv->gtt_offset;
drm_gem_object_unreference(obj); drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -2167,29 +2359,6 @@ void i915_gem_free_object(struct drm_gem_object *obj) ...@@ -2167,29 +2359,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
} }
static int
i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain)
{
struct drm_device *dev = obj->dev;
int ret;
uint32_t flush_domains;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
if (ret)
return ret;
flush_domains = i915_gem_dev_set_domain(obj->dev);
if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
(void) i915_add_request(dev, flush_domains);
return 0;
}
/** Unbinds all objects that are on the given buffer list. */ /** Unbinds all objects that are on the given buffer list. */
static int static int
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
......
...@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset, ...@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
list_for_each_entry(gem_request, &dev_priv->mm.request_list, list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list) list)
{ {
DRM_PROC_PRINT(" %d @ %d %08x\n", DRM_PROC_PRINT(" %d @ %d\n",
gem_request->seqno, gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies), (int) (jiffies - gem_request->emitted_jiffies));
gem_request->flush_domains);
} }
if (len > request + offset) if (len > request + offset)
return request; return request;
......
...@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) ...@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dcc & DCC_CHANNEL_XOR_DISABLE) { dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9; swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_I965GM(dev) || IS_GM45(dev)) { } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
/* GM965 only does bit 11-based channel (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
* randomization /* GM965/GM45 does either bit 11 or bit 17
* swizzling.
*/ */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11; swizzle_y = I915_BIT_6_SWIZZLE_9_11;
......
...@@ -522,6 +522,7 @@ ...@@ -522,6 +522,7 @@
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
#define DCC_ADDRESSING_MODE_MASK (3 << 0) #define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10) #define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
/** 965 MCH register controlling DRAM channel configuration */ /** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206 #define C0DRB3 0x10206
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment