Commit fb7d516a authored by Daniel Vetter's avatar Daniel Vetter Committed by Chris Wilson

drm/i915: add accounting for mappable objects in gtt v2

More precisely: For those that _need_ to be mappable. Also add two
BUG_ONs in fault and pin to check the consistency of the mappable
flag.

Changes in v2:
- Add tracking of gtt mappable space (to notice mappable/unmappable
  balancing issues).
- Improve the mappable working set tracking by tracking fault and pin
  separately.
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent ec57d260
...@@ -131,6 +131,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -131,6 +131,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (fence: %d)", obj->fence_reg); seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL) if (obj->gtt_space != NULL)
seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
if (obj->pin_mappable || obj->fault_mappable)
seq_printf(m, " (mappable)");
if (obj->ring != NULL) if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name); seq_printf(m, " (%s)", obj->ring->name);
} }
...@@ -207,6 +209,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -207,6 +209,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count);
seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory);
seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used);
seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total);
seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
......
...@@ -643,9 +643,13 @@ typedef struct drm_i915_private { ...@@ -643,9 +643,13 @@ typedef struct drm_i915_private {
size_t object_memory; size_t object_memory;
size_t pin_memory; size_t pin_memory;
size_t gtt_memory; size_t gtt_memory;
size_t gtt_mappable_memory;
size_t mappable_gtt_used;
size_t mappable_gtt_total;
size_t gtt_total; size_t gtt_total;
u32 object_count; u32 object_count;
u32 pin_count; u32 pin_count;
u32 gtt_mappable_count;
u32 gtt_count; u32 gtt_count;
} mm; } mm;
struct sdvo_device_mapping sdvo_mappings[2]; struct sdvo_device_mapping sdvo_mappings[2];
...@@ -775,6 +779,14 @@ struct drm_i915_gem_object { ...@@ -775,6 +779,14 @@ struct drm_i915_gem_object {
unsigned int pin_count : 4; unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/**
* Whether the current gtt mapping needs to be mappable (and isn't just
* mappable by accident). Track pin and fault separate for a more
* accurate mappable working set.
*/
unsigned int fault_mappable : 1;
unsigned int pin_mappable : 1;
/** AGP memory structure for our GTT binding. */ /** AGP memory structure for our GTT binding. */
DRM_AGP_MEM *agp_mem; DRM_AGP_MEM *agp_mem;
......
...@@ -84,31 +84,83 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, ...@@ -84,31 +84,83 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
} }
static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
size_t size) struct drm_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
dev_priv->mm.gtt_count++; dev_priv->mm.gtt_count++;
dev_priv->mm.gtt_memory += size; dev_priv->mm.gtt_memory += obj->size;
if (obj_priv->gtt_offset < dev_priv->mm.gtt_mappable_end) {
dev_priv->mm.mappable_gtt_used +=
min_t(size_t, obj->size,
dev_priv->mm.gtt_mappable_end
- obj_priv->gtt_offset);
}
} }
static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
size_t size) struct drm_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
dev_priv->mm.gtt_count--; dev_priv->mm.gtt_count--;
dev_priv->mm.gtt_memory -= size; dev_priv->mm.gtt_memory -= obj->size;
if (obj_priv->gtt_offset < dev_priv->mm.gtt_mappable_end) {
dev_priv->mm.mappable_gtt_used -=
min_t(size_t, obj->size,
dev_priv->mm.gtt_mappable_end
- obj_priv->gtt_offset);
}
}
/**
* Update the mappable working set counters. Call _only_ when there is a change
* in one of (pin|fault)_mappable and update *_mappable _before_ calling.
* @mappable: new state the changed mappable flag (either pin_ or fault_).
*/
static void
i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
struct drm_gem_object *obj,
bool mappable)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (mappable) {
if (obj_priv->pin_mappable && obj_priv->fault_mappable)
/* Combined state was already mappable. */
return;
dev_priv->mm.gtt_mappable_count++;
dev_priv->mm.gtt_mappable_memory += obj->size;
} else {
if (obj_priv->pin_mappable || obj_priv->fault_mappable)
/* Combined state still mappable. */
return;
dev_priv->mm.gtt_mappable_count--;
dev_priv->mm.gtt_mappable_memory -= obj->size;
}
} }
static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
size_t size) struct drm_gem_object *obj,
bool mappable)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
dev_priv->mm.pin_count++; dev_priv->mm.pin_count++;
dev_priv->mm.pin_memory += size; dev_priv->mm.pin_memory += obj->size;
if (mappable) {
obj_priv->pin_mappable = true;
i915_gem_info_update_mappable(dev_priv, obj, true);
}
} }
static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
size_t size) struct drm_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
dev_priv->mm.pin_count--; dev_priv->mm.pin_count--;
dev_priv->mm.pin_memory -= size; dev_priv->mm.pin_memory -= obj->size;
if (obj_priv->pin_mappable) {
obj_priv->pin_mappable = false;
i915_gem_info_update_mappable(dev_priv, obj, false);
}
} }
int int
...@@ -188,6 +240,7 @@ int i915_gem_do_init(struct drm_device *dev, ...@@ -188,6 +240,7 @@ int i915_gem_do_init(struct drm_device *dev,
end - start); end - start);
dev_priv->mm.gtt_total = end - start; dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
dev_priv->mm.gtt_mappable_end = mappable_end; dev_priv->mm.gtt_mappable_end = mappable_end;
return 0; return 0;
...@@ -1266,6 +1319,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1266,6 +1319,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */ /* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable);
if (!i915_gem_object_cpu_accessible(obj_priv)) if (!i915_gem_object_cpu_accessible(obj_priv))
i915_gem_object_unbind(obj); i915_gem_object_unbind(obj);
...@@ -1279,6 +1333,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1279,6 +1333,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock; goto unlock;
} }
if (!obj_priv->fault_mappable) {
obj_priv->fault_mappable = true;
i915_gem_info_update_mappable(dev_priv, obj, true);
}
/* Need a new fence register? */ /* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) { if (obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj, true); ret = i915_gem_object_get_fence_reg(obj, true);
...@@ -1396,11 +1455,17 @@ void ...@@ -1396,11 +1455,17 @@ void
i915_gem_release_mmap(struct drm_gem_object *obj) i915_gem_release_mmap(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (dev->dev_mapping) if (dev->dev_mapping)
unmap_mapping_range(dev->dev_mapping, unmap_mapping_range(dev->dev_mapping,
obj_priv->mmap_offset, obj->size, 1); obj_priv->mmap_offset, obj->size, 1);
if (obj_priv->fault_mappable) {
obj_priv->fault_mappable = false;
i915_gem_info_update_mappable(dev_priv, obj, false);
}
} }
static void static void
...@@ -2177,7 +2242,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) ...@@ -2177,7 +2242,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
i915_gem_object_put_pages(obj); i915_gem_object_put_pages(obj);
BUG_ON(obj_priv->pages_refcount); BUG_ON(obj_priv->pages_refcount);
i915_gem_info_remove_gtt(dev_priv, obj->size); i915_gem_info_remove_gtt(dev_priv, obj);
list_del_init(&obj_priv->mm_list); list_del_init(&obj_priv->mm_list);
drm_mm_put_block(obj_priv->gtt_space); drm_mm_put_block(obj_priv->gtt_space);
...@@ -2763,9 +2828,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2763,9 +2828,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
goto search_free; goto search_free;
} }
obj_priv->gtt_offset = obj_priv->gtt_space->start;
/* keep track of bounds object by adding it to the inactive list */ /* keep track of bounds object by adding it to the inactive list */
list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
i915_gem_info_add_gtt(dev_priv, obj->size); i915_gem_info_add_gtt(dev_priv, obj);
/* Assert that the object is not currently in any GPU domain. As it /* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
...@@ -2774,7 +2841,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2774,7 +2841,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
obj_priv->gtt_offset = obj_priv->gtt_space->start;
trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable); trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable);
return 0; return 0;
...@@ -4107,11 +4173,12 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, ...@@ -4107,11 +4173,12 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
* remove it from the inactive list * remove it from the inactive list
*/ */
if (obj_priv->pin_count == 1) { if (obj_priv->pin_count == 1) {
i915_gem_info_add_pin(dev_priv, obj->size); i915_gem_info_add_pin(dev_priv, obj, mappable);
if (!obj_priv->active) if (!obj_priv->active)
list_move_tail(&obj_priv->mm_list, list_move_tail(&obj_priv->mm_list,
&dev_priv->mm.pinned_list); &dev_priv->mm.pinned_list);
} }
BUG_ON(!obj_priv->pin_mappable && mappable);
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
return 0; return 0;
...@@ -4137,7 +4204,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) ...@@ -4137,7 +4204,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
if (!obj_priv->active) if (!obj_priv->active)
list_move_tail(&obj_priv->mm_list, list_move_tail(&obj_priv->mm_list,
&dev_priv->mm.inactive_list); &dev_priv->mm.inactive_list);
i915_gem_info_remove_pin(dev_priv, obj->size); i915_gem_info_remove_pin(dev_priv, obj);
} }
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment