Commit ca262a99 authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie

drm/ttm: Rework validation & memory space allocation (V3)

This change allow driver to pass sorted memory placement,
from most prefered placement to least prefered placement.
In order to avoid long function prototype a structure is
used to gather memory placement informations such as range
restriction (if you need a buffer to be in given range).
Range restriction is determined by fpfn & lpfn which are
the first page and last page number btw which allocation
can happen. If those fields are set to 0 ttm will assume
buffer can be put anywhere in the address space (thus it
avoids putting a burden on the driver to always properly
set those fields).

This patch also factor few functions like evicting first
entry of lru list or getting a memory space. This avoid
code duplication.

V2: Change API to use placement flags and array instead
    of packing placement order into a quadword.
V3: Make sure we set the appropriate mem.placement flag
    when validating or allocation memory space.

[Pending Thomas Hellstrom further review but okay
from preliminary review so far].
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent a2e68e92
...@@ -27,6 +27,14 @@ ...@@ -27,6 +27,14 @@
/* /*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/ */
/* Notes:
*
* We store bo pointer in drm_mm_node struct so we know which bo own a
* specific node. There is no protection on the pointer, thus to make
* sure things don't go berserk you have to access this pointer while
* holding the global lru lock and make sure anytime you free a node you
* reset the pointer to NULL.
*/
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h" #include "ttm/ttm_bo_driver.h"
...@@ -247,7 +255,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve); ...@@ -247,7 +255,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
/* /*
* Call bo->mutex locked. * Call bo->mutex locked.
*/ */
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
...@@ -329,14 +336,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -329,14 +336,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
} }
if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bo->mem.mem_type == TTM_PL_SYSTEM) {
bo->mem = *mem;
struct ttm_mem_reg *old_mem = &bo->mem;
uint32_t save_flags = old_mem->placement;
*old_mem = *mem;
mem->mm_node = NULL; mem->mm_node = NULL;
ttm_flag_masked(&save_flags, mem->placement,
TTM_PL_MASK_MEMTYPE);
goto moved; goto moved;
} }
...@@ -419,6 +420,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) ...@@ -419,6 +420,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
kref_put(&bo->list_kref, ttm_bo_ref_bug); kref_put(&bo->list_kref, ttm_bo_ref_bug);
} }
if (bo->mem.mm_node) { if (bo->mem.mm_node) {
bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node); drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL; bo->mem.mm_node = NULL;
} }
...@@ -555,17 +557,14 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) ...@@ -555,17 +557,14 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
} }
EXPORT_SYMBOL(ttm_bo_unref); EXPORT_SYMBOL(ttm_bo_unref);
static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool interruptible, bool no_wait) bool no_wait)
{ {
int ret = 0;
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem; struct ttm_mem_reg evict_mem;
uint32_t proposed_placement; struct ttm_placement placement;
int ret = 0;
if (bo->mem.mem_type != mem_type)
goto out;
spin_lock(&bo->lock); spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait); ret = ttm_bo_wait(bo, false, interruptible, no_wait);
...@@ -585,14 +584,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, ...@@ -585,14 +584,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
evict_mem = bo->mem; evict_mem = bo->mem;
evict_mem.mm_node = NULL; evict_mem.mm_node = NULL;
proposed_placement = bdev->driver->evict_flags(bo); bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
ret = ttm_bo_mem_space(bo, proposed_placement, no_wait);
&evict_mem, interruptible, no_wait);
if (unlikely(ret != 0 && ret != -ERESTART))
ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
&evict_mem, interruptible, no_wait);
if (ret) { if (ret) {
if (ret != -ERESTART) if (ret != -ERESTART)
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
...@@ -606,95 +600,117 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, ...@@ -606,95 +600,117 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
if (ret) { if (ret) {
if (ret != -ERESTART) if (ret != -ERESTART)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
goto out;
}
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
if (evict_mem.mm_node) { if (evict_mem.mm_node) {
evict_mem.mm_node->private = NULL;
drm_mm_put_block(evict_mem.mm_node); drm_mm_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL; evict_mem.mm_node = NULL;
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
goto out;
}
bo->evicted = true; bo->evicted = true;
out: out:
return ret; return ret;
} }
/** static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem,
uint32_t mem_type, uint32_t mem_type,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct drm_mm_node *node;
struct ttm_buffer_object *entry;
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct list_head *lru; struct ttm_buffer_object *bo;
unsigned long num_pages = mem->num_pages; int ret, put_count = 0;
int put_count = 0;
int ret;
retry_pre_get:
ret = drm_mm_pre_get(&man->manager);
if (unlikely(ret != 0))
return ret;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
do { bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
node = drm_mm_search_free(&man->manager, num_pages, kref_get(&bo->list_kref);
mem->page_alignment, 1); ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
if (node)
break;
lru = &man->lru;
if (list_empty(lru))
break;
entry = list_first_entry(lru, struct ttm_buffer_object, lru);
kref_get(&entry->list_kref);
ret =
ttm_bo_reserve_locked(entry, interruptible, no_wait,
false, 0);
if (likely(ret == 0)) if (likely(ret == 0))
put_count = ttm_bo_del_from_lru(entry); put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
while (put_count--) while (put_count--)
kref_put(&entry->list_kref, ttm_bo_ref_bug); kref_put(&bo->list_kref, ttm_bo_ref_bug);
ret = ttm_bo_evict(bo, interruptible, no_wait);
ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
ttm_bo_unreserve(entry); static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
struct drm_mm_node **node)
{
struct ttm_bo_global *glob = bo->glob;
unsigned long lpfn;
int ret;
kref_put(&entry->list_kref, ttm_bo_release_list); lpfn = placement->lpfn;
if (ret) if (!lpfn)
lpfn = man->size;
*node = NULL;
do {
ret = drm_mm_pre_get(&man->manager);
if (unlikely(ret))
return ret; return ret;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
} while (1); *node = drm_mm_search_free_in_range(&man->manager,
mem->num_pages, mem->page_alignment,
if (!node) { placement->fpfn, lpfn, 1);
if (unlikely(*node == NULL)) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
return -ENOMEM; return 0;
} }
*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&glob->lru_lock);
} while (*node == NULL);
return 0;
}
node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); /**
if (unlikely(!node)) { * Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct drm_mm_node *node;
int ret;
do {
ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
if (unlikely(ret != 0))
return ret;
if (node)
break;
spin_lock(&glob->lru_lock);
if (list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
goto retry_pre_get; break;
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
no_wait);
if (unlikely(ret != 0))
return ret;
} while (1);
if (node == NULL)
return -ENOMEM;
mem->mm_node = node; mem->mm_node = node;
mem->mem_type = mem_type; mem->mem_type = mem_type;
return 0; return 0;
...@@ -725,7 +741,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, ...@@ -725,7 +741,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
return result; return result;
} }
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
bool disallow_fixed, bool disallow_fixed,
uint32_t mem_type, uint32_t mem_type,
...@@ -749,6 +764,18 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, ...@@ -749,6 +764,18 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
return true; return true;
} }
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
{
int i;
for (i = 0; i <= TTM_PL_PRIV5; i++)
if (flags & (1 << i)) {
*mem_type = i;
return 0;
}
return -EINVAL;
}
/** /**
* Creates space for memory region @mem according to its type. * Creates space for memory region @mem according to its type.
* *
...@@ -758,33 +785,32 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, ...@@ -758,33 +785,32 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
* space. * space.
*/ */
int ttm_bo_mem_space(struct ttm_buffer_object *bo, int ttm_bo_mem_space(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
uint32_t num_prios = bdev->driver->num_mem_type_prio;
const uint32_t *prios = bdev->driver->mem_type_prio;
uint32_t i;
uint32_t mem_type = TTM_PL_SYSTEM; uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0; uint32_t cur_flags = 0;
bool type_found = false; bool type_found = false;
bool type_ok = false; bool type_ok = false;
bool has_eagain = false; bool has_eagain = false;
struct drm_mm_node *node = NULL; struct drm_mm_node *node = NULL;
int ret; int i, ret;
mem->mm_node = NULL; mem->mm_node = NULL;
for (i = 0; i < num_prios; ++i) { for (i = 0; i <= placement->num_placement; ++i) {
mem_type = prios[i]; ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type]; man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man, type_ok = ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user, bo->type == ttm_bo_type_user,
mem_type, proposed_placement, mem_type,
placement->placement[i],
&cur_flags); &cur_flags);
if (!type_ok) if (!type_ok)
...@@ -792,32 +818,22 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -792,32 +818,22 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags); cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, placement->placement[i],
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM) if (mem_type == TTM_PL_SYSTEM)
break; break;
if (man->has_type && man->use_type) { if (man->has_type && man->use_type) {
type_found = true; type_found = true;
do { ret = ttm_bo_man_get_node(bo, man, placement, mem,
ret = drm_mm_pre_get(&man->manager); &node);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
spin_lock(&glob->lru_lock);
node = drm_mm_search_free(&man->manager,
mem->num_pages,
mem->page_alignment,
1);
if (unlikely(!node)) {
spin_unlock(&glob->lru_lock);
break;
}
node = drm_mm_get_block_atomic(node,
mem->num_pages,
mem->
page_alignment);
spin_unlock(&glob->lru_lock);
} while (!node);
} }
if (node) if (node)
break; break;
...@@ -827,43 +843,48 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -827,43 +843,48 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node; mem->mm_node = node;
mem->mem_type = mem_type; mem->mem_type = mem_type;
mem->placement = cur_flags; mem->placement = cur_flags;
if (node)
node->private = bo;
return 0; return 0;
} }
if (!type_found) if (!type_found)
return -EINVAL; return -EINVAL;
num_prios = bdev->driver->num_mem_busy_prio; for (i = 0; i <= placement->num_busy_placement; ++i) {
prios = bdev->driver->mem_busy_prio; ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
for (i = 0; i < num_prios; ++i) { if (ret)
mem_type = prios[i]; return ret;
man = &bdev->man[mem_type]; man = &bdev->man[mem_type];
if (!man->has_type) if (!man->has_type)
continue; continue;
if (!ttm_bo_mt_compatible(man, if (!ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user, bo->type == ttm_bo_type_user,
mem_type, mem_type,
proposed_placement, &cur_flags)) placement->placement[i],
&cur_flags))
continue; continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags); cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, placement->placement[i],
~TTM_PL_MASK_MEMTYPE);
ret = ttm_bo_mem_force_space(bdev, mem, mem_type, ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait); interruptible, no_wait);
if (ret == 0 && mem->mm_node) { if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags; mem->placement = cur_flags;
mem->mm_node->private = bo;
return 0; return 0;
} }
if (ret == -ERESTART) if (ret == -ERESTART)
has_eagain = true; has_eagain = true;
} }
ret = (has_eagain) ? -ERESTART : -ENOMEM; ret = (has_eagain) ? -ERESTART : -ENOMEM;
return ret; return ret;
} }
...@@ -886,7 +907,7 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) ...@@ -886,7 +907,7 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
} }
int ttm_bo_move_buffer(struct ttm_buffer_object *bo, int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
...@@ -900,101 +921,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -900,101 +921,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary, * Have the driver move function wait for idle when necessary,
* instead of doing it here. * instead of doing it here.
*/ */
spin_lock(&bo->lock); spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait); ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
if (ret) if (ret)
return ret; return ret;
mem.num_pages = bo->num_pages; mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT; mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment; mem.page_alignment = bo->mem.page_alignment;
/* /*
* Determine where to move the buffer. * Determine where to move the buffer.
*/ */
ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
interruptible, no_wait);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
out_unlock: out_unlock:
if (ret && mem.mm_node) { if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node); drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
} }
return ret; return ret;
} }
static int ttm_bo_mem_compat(uint32_t proposed_placement, static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) int i;
return 0;
if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
return 0;
return 1; for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement &
TTM_PL_MASK_CACHING) &&
(placement->placement[i] & mem->placement &
TTM_PL_MASK_MEM))
return i;
}
return -1;
} }
int ttm_buffer_object_validate(struct ttm_buffer_object *bo, int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
int ret; int ret;
BUG_ON(!atomic_read(&bo->reserved)); BUG_ON(!atomic_read(&bo->reserved));
bo->proposed_placement = proposed_placement; /* Check that range is valid */
if (placement->lpfn || placement->fpfn)
TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", if (placement->fpfn > placement->lpfn ||
(unsigned long)proposed_placement, (placement->lpfn - placement->fpfn) < bo->num_pages)
(unsigned long)bo->mem.placement); return -EINVAL;
/* /*
* Check whether we need to move buffer. * Check whether we need to move buffer.
*/ */
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { if (ret < 0) {
ret = ttm_bo_move_buffer(bo, bo->proposed_placement, ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
interruptible, no_wait); if (ret)
if (ret) {
if (ret != -ERESTART)
printk(KERN_ERR TTM_PFX
"Failed moving buffer. "
"Proposed placement 0x%08x\n",
bo->proposed_placement);
if (ret == -ENOMEM)
printk(KERN_ERR TTM_PFX
"Out of aperture space or "
"DRM memory quota.\n");
return ret; return ret;
} else {
/*
* Use the access and other non-mapping-related flag bits from
* the compatible memory placement flags to the active flags
*/
ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
~TTM_PL_MASK_MEMTYPE);
} }
}
/* /*
* We might need to add a TTM. * We might need to add a TTM.
*/ */
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, true); ret = ttm_bo_add_ttm(bo, true);
if (ret) if (ret)
return ret; return ret;
} }
/*
* Validation has succeeded, move the access and other
* non-mapping-related flag bits from the proposed flags to
* the active flags
*/
ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
~TTM_PL_MASK_MEMTYPE);
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_buffer_object_validate); EXPORT_SYMBOL(ttm_buffer_object_validate);
...@@ -1042,8 +1044,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -1042,8 +1044,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
size_t acc_size, size_t acc_size,
void (*destroy) (struct ttm_buffer_object *)) void (*destroy) (struct ttm_buffer_object *))
{ {
int ret = 0; int i, c, ret = 0;
unsigned long num_pages; unsigned long num_pages;
uint32_t placements[8];
struct ttm_placement placement;
size += buffer_start & ~PAGE_MASK; size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
...@@ -1100,7 +1104,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -1100,7 +1104,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
goto out_err; goto out_err;
} }
ret = ttm_buffer_object_validate(bo, flags, interruptible, false); placement.fpfn = 0;
placement.lpfn = 0;
for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
if (flags & (1 << i))
placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
placement.placement = placements;
placement.num_placement = c;
placement.busy_placement = placements;
placement.num_busy_placement = c;
ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -1135,8 +1148,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, ...@@ -1135,8 +1148,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo) struct ttm_buffer_object **p_bo)
{ {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
int ret;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
int ret;
size_t acc_size = size_t acc_size =
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
...@@ -1161,66 +1174,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, ...@@ -1161,66 +1174,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
return ret; return ret;
} }
static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
uint32_t mem_type, bool allow_errors)
{
int ret;
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bo->lock);
if (ret && allow_errors)
goto out;
if (bo->mem.mem_type == mem_type)
ret = ttm_bo_evict(bo, mem_type, false, false);
if (ret) {
if (allow_errors) {
goto out;
} else {
ret = 0;
printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
}
}
out:
return ret;
}
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
struct list_head *head,
unsigned mem_type, bool allow_errors) unsigned mem_type, bool allow_errors)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct ttm_buffer_object *entry;
int ret; int ret;
int put_count;
/* /*
* Can't use standard list traversal since we're unlocking. * Can't use standard list traversal since we're unlocking.
*/ */
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
while (!list_empty(head)) {
entry = list_first_entry(head, struct ttm_buffer_object, lru);
kref_get(&entry->list_kref);
ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
put_count = ttm_bo_del_from_lru(entry);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
while (put_count--) ret = ttm_mem_evict_first(bdev, mem_type, false, false);
kref_put(&entry->list_kref, ttm_bo_ref_bug); if (ret) {
BUG_ON(ret); if (allow_errors) {
ret = ttm_bo_leave_list(entry, mem_type, allow_errors); return ret;
ttm_bo_unreserve(entry); } else {
kref_put(&entry->list_kref, ttm_bo_release_list); printk(KERN_ERR TTM_PFX
"Cleanup eviction failed\n");
}
}
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
return 0; return 0;
} }
...@@ -1247,7 +1226,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ...@@ -1247,7 +1226,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0; ret = 0;
if (mem_type > 0) { if (mem_type > 0) {
ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager)) if (drm_mm_clean(&man->manager))
...@@ -1280,12 +1259,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) ...@@ -1280,12 +1259,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0; return 0;
} }
return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); return ttm_bo_force_list_clean(bdev, mem_type, true);
} }
EXPORT_SYMBOL(ttm_bo_evict_mm); EXPORT_SYMBOL(ttm_bo_evict_mm);
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_offset, unsigned long p_size) unsigned long p_size)
{ {
int ret = -EINVAL; int ret = -EINVAL;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
...@@ -1315,7 +1294,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ...@@ -1315,7 +1294,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type); type);
return ret; return ret;
} }
ret = drm_mm_init(&man->manager, p_offset, p_size); ret = drm_mm_init(&man->manager, 0, p_size);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1464,7 +1443,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, ...@@ -1464,7 +1443,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
* Initialize the system memory buffer type. * Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized. * Other types need to be driver / IOCTL initialized.
*/ */
ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_sys; goto out_no_sys;
......
...@@ -44,6 +44,29 @@ struct ttm_bo_device; ...@@ -44,6 +44,29 @@ struct ttm_bo_device;
struct drm_mm_node; struct drm_mm_node;
/**
* struct ttm_placement
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
* @num_placement: number of prefered placements
* @placement: prefered placements
* @num_busy_placement: number of prefered placements when need to evict buffer
* @busy_placement: prefered placements when need to evict buffer
*
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
unsigned fpfn;
unsigned lpfn;
unsigned num_placement;
const uint32_t *placement;
unsigned num_busy_placement;
const uint32_t *busy_placement;
};
/** /**
* struct ttm_mem_reg * struct ttm_mem_reg
* *
...@@ -109,10 +132,6 @@ struct ttm_tt; ...@@ -109,10 +132,6 @@ struct ttm_tt;
* the object is destroyed. * the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change. * @event_queue: Queue for processes waiting on buffer object status change.
* @lock: spinlock protecting mostly synchronization members. * @lock: spinlock protecting mostly synchronization members.
* @proposed_placement: Proposed placement for the buffer. Changed only by the
* creator prior to validation as opposed to bo->mem.proposed_flags which is
* changed by the implementation prior to a buffer move if it wants to outsmart
* the buffer creator / user. This latter happens, for example, at eviction.
* @mem: structure describing current placement. * @mem: structure describing current placement.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers * @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member * pinned in physical memory. If this behaviour is not desired, this member
...@@ -177,7 +196,6 @@ struct ttm_buffer_object { ...@@ -177,7 +196,6 @@ struct ttm_buffer_object {
* Members protected by the bo::reserved lock. * Members protected by the bo::reserved lock.
*/ */
uint32_t proposed_placement;
struct ttm_mem_reg mem; struct ttm_mem_reg mem;
struct file *persistant_swap_storage; struct file *persistant_swap_storage;
struct ttm_tt *ttm; struct ttm_tt *ttm;
...@@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, ...@@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* ttm_buffer_object_validate * ttm_buffer_object_validate
* *
* @bo: The buffer object. * @bo: The buffer object.
* @proposed_placement: Proposed_placement for the buffer object. * @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping. * @interruptible: Sleep interruptible if sleeping.
* @no_wait: Return immediately if the buffer is busy. * @no_wait: Return immediately if the buffer is busy.
* *
* Changes placement and caching policy of the buffer object * Changes placement and caching policy of the buffer object
* according to bo::proposed_flags. * according proposed placement.
* Returns * Returns
* -EINVAL on invalid proposed_flags. * -EINVAL on invalid proposed placement.
* -ENOMEM on out-of-memory condition. * -ENOMEM on out-of-memory condition.
* -EBUSY if no_wait is true and buffer busy. * -EBUSY if no_wait is true and buffer busy.
* -ERESTART if interrupted by a signal. * -ERESTART if interrupted by a signal.
*/ */
extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
/** /**
* ttm_bo_unref * ttm_bo_unref
* *
...@@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, ...@@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
* *
* @bdev: Pointer to a ttm_bo_device struct. * @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type. * @mem_type: The memory type.
* @p_offset: offset for managed area in pages.
* @p_size: size managed area in pages. * @p_size: size managed area in pages.
* *
* Initialize a manager for a given memory type. * Initialize a manager for a given memory type.
...@@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, ...@@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
*/ */
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_offset, unsigned long p_size); unsigned long p_size);
/** /**
* ttm_bo_clean_mm * ttm_bo_clean_mm
* *
......
...@@ -242,12 +242,6 @@ struct ttm_mem_type_manager { ...@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
/** /**
* struct ttm_bo_driver * struct ttm_bo_driver
* *
* @mem_type_prio: Priority array of memory types to place a buffer object in
* if it fits without evicting buffers from any of these memory types.
* @mem_busy_prio: Priority array of memory types to place a buffer object in
* if it needs to evict buffers to make room.
* @num_mem_type_prio: Number of elements in the @mem_type_prio array.
* @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
* @create_ttm_backend_entry: Callback to create a struct ttm_backend. * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
* @invalidate_caches: Callback to invalidate read caches when a buffer object * @invalidate_caches: Callback to invalidate read caches when a buffer object
* has been evicted. * has been evicted.
...@@ -265,11 +259,6 @@ struct ttm_mem_type_manager { ...@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
*/ */
struct ttm_bo_driver { struct ttm_bo_driver {
const uint32_t *mem_type_prio;
const uint32_t *mem_busy_prio;
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
/** /**
* struct ttm_bo_driver member create_ttm_backend_entry * struct ttm_bo_driver member create_ttm_backend_entry
* *
...@@ -306,7 +295,8 @@ struct ttm_bo_driver { ...@@ -306,7 +295,8 @@ struct ttm_bo_driver {
* finished, they'll end up in bo->mem.flags * finished, they'll end up in bo->mem.flags
*/ */
uint32_t(*evict_flags) (struct ttm_buffer_object *bo); void(*evict_flags) (struct ttm_buffer_object *bo,
struct ttm_placement *placement);
/** /**
* struct ttm_bo_driver member move: * struct ttm_bo_driver member move:
* *
...@@ -651,7 +641,7 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, ...@@ -651,7 +641,7 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* -ERESTART: An interruptible sleep was interrupted by a signal. * -ERESTART: An interruptible sleep was interrupted by a signal.
*/ */
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment