Commit cb1c8146 authored by Christian König's avatar Christian König

drm/ttm: flip the switch for driver allocated resources v2

Instead of both driver and TTM allocating memory finalize embedding the
ttm_resource object as base into the driver backends.

v2: fix typo in vmwgfx grid mgr and double init in amdgpu_vram_mgr.c
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-10-christian.koenig@amd.com
parent d3bcb4b0
...@@ -40,8 +40,7 @@ to_gtt_mgr(struct ttm_resource_manager *man) ...@@ -40,8 +40,7 @@ to_gtt_mgr(struct ttm_resource_manager *man)
static inline struct amdgpu_gtt_node * static inline struct amdgpu_gtt_node *
to_amdgpu_gtt_node(struct ttm_resource *res) to_amdgpu_gtt_node(struct ttm_resource *res)
{ {
return container_of(res->mm_node, struct amdgpu_gtt_node, return container_of(res, struct amdgpu_gtt_node, base.base);
base.mm_nodes[0]);
} }
/** /**
...@@ -102,13 +101,13 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = { ...@@ -102,13 +101,13 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
/** /**
* amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
* *
* @mem: the mem object to check * @res: the mem object to check
* *
* Check if a mem object has already address space allocated. * Check if a mem object has already address space allocated.
*/ */
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
{ {
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem); struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
return drm_mm_node_allocated(&node->base.mm_nodes[0]); return drm_mm_node_allocated(&node->base.mm_nodes[0]);
} }
...@@ -126,19 +125,20 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) ...@@ -126,19 +125,20 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo, struct ttm_buffer_object *tbo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *mem) struct ttm_resource **res)
{ {
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
uint32_t num_pages = PFN_UP(tbo->base.size);
struct amdgpu_gtt_node *node; struct amdgpu_gtt_node *node;
int r; int r;
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
if ((tbo->resource == mem || tbo->resource->mem_type != TTM_PL_TT) && if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
atomic64_read(&mgr->available) < mem->num_pages) { atomic64_read(&mgr->available) < num_pages) {
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
return -ENOSPC; return -ENOSPC;
} }
atomic64_sub(mem->num_pages, &mgr->available); atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
...@@ -154,29 +154,28 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -154,29 +154,28 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm, r = drm_mm_insert_node_in_range(&mgr->mm,
&node->base.mm_nodes[0], &node->base.mm_nodes[0],
mem->num_pages, num_pages, tbo->page_alignment,
tbo->page_alignment, 0, 0, place->fpfn, place->lpfn,
place->fpfn, place->lpfn,
DRM_MM_INSERT_BEST); DRM_MM_INSERT_BEST);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
if (unlikely(r)) if (unlikely(r))
goto err_free; goto err_free;
mem->start = node->base.mm_nodes[0].start; node->base.base.start = node->base.mm_nodes[0].start;
} else { } else {
node->base.mm_nodes[0].start = 0; node->base.mm_nodes[0].start = 0;
node->base.mm_nodes[0].size = mem->num_pages; node->base.mm_nodes[0].size = node->base.base.num_pages;
mem->start = AMDGPU_BO_INVALID_OFFSET; node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
} }
mem->mm_node = &node->base.mm_nodes[0]; *res = &node->base.base;
return 0; return 0;
err_free: err_free:
kfree(node); kfree(node);
err_out: err_out:
atomic64_add(mem->num_pages, &mgr->available); atomic64_add(num_pages, &mgr->available);
return r; return r;
} }
...@@ -190,21 +189,16 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -190,21 +189,16 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
* Free the allocated GTT again. * Free the allocated GTT again.
*/ */
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *mem) struct ttm_resource *res)
{ {
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node;
if (!mem->mm_node)
return;
node = to_amdgpu_gtt_node(mem);
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
if (drm_mm_node_allocated(&node->base.mm_nodes[0])) if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->base.mm_nodes[0]); drm_mm_remove_node(&node->base.mm_nodes[0]);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available); atomic64_add(res->num_pages, &mgr->available);
kfree(node); kfree(node);
} }
......
...@@ -1296,7 +1296,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) ...@@ -1296,7 +1296,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv) if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node || if (bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return; return;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <drm/drm_mm.h> #include <drm/drm_mm.h>
#include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_range_manager.h>
/* state back for walking over vram_mgr and gtt_mgr allocations */ /* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor { struct amdgpu_res_cursor {
...@@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, ...@@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
{ {
struct drm_mm_node *node; struct drm_mm_node *node;
if (!res || !res->mm_node) { if (!res) {
cur->start = start; cur->start = start;
cur->size = size; cur->size = size;
cur->remaining = size; cur->remaining = size;
...@@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, ...@@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
BUG_ON(start + size > res->num_pages << PAGE_SHIFT); BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
node = res->mm_node; node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT) while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT; start -= node++->size << PAGE_SHIFT;
......
...@@ -219,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, ...@@ -219,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *mem = bo->tbo.resource; struct ttm_resource *res = bo->tbo.resource;
struct drm_mm_node *nodes = mem->mm_node; unsigned pages = res->num_pages;
unsigned pages = mem->num_pages; struct drm_mm_node *mm;
u64 usage; u64 usage;
if (amdgpu_gmc_vram_full_visible(&adev->gmc)) if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo); return amdgpu_bo_size(bo);
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0; return 0;
for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
usage += amdgpu_vram_mgr_vis_size(adev, nodes); for (usage = 0; pages; pages -= mm->size, mm++)
usage += amdgpu_vram_mgr_vis_size(adev, mm);
return usage; return usage;
} }
...@@ -367,7 +368,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, ...@@ -367,7 +368,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo, struct ttm_buffer_object *tbo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *mem) struct ttm_resource **res)
{ {
unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
...@@ -388,7 +389,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -388,7 +389,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
max_bytes -= AMDGPU_VM_RESERVED_VRAM; max_bytes -= AMDGPU_VM_RESERVED_VRAM;
/* bail out quickly if there's likely not enough VRAM for this BO */ /* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; mem_bytes = tbo->base.size;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
r = -ENOSPC; r = -ENOSPC;
goto error_sub; goto error_sub;
...@@ -406,7 +407,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -406,7 +407,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
#endif #endif
pages_per_node = max_t(uint32_t, pages_per_node, pages_per_node = max_t(uint32_t, pages_per_node,
tbo->page_alignment); tbo->page_alignment);
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); num_nodes = DIV_ROUND_UP(PFN_UP(mem_bytes), pages_per_node);
} }
node = kvmalloc(struct_size(node, mm_nodes, num_nodes), node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
...@@ -422,8 +423,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -422,8 +423,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN) if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH; mode = DRM_MM_INSERT_HIGH;
mem->start = 0; pages_left = node->base.num_pages;
pages_left = mem->num_pages;
/* Limit maximum size to 2GB due to SG table limitations */ /* Limit maximum size to 2GB due to SG table limitations */
pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
...@@ -451,7 +451,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -451,7 +451,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
} }
vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
amdgpu_vram_mgr_virt_start(mem, &node->mm_nodes[i]); amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
pages_left -= pages; pages_left -= pages;
++i; ++i;
...@@ -461,10 +461,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -461,10 +461,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
if (i == 1) if (i == 1)
mem->placement |= TTM_PL_FLAG_CONTIGUOUS; node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
atomic64_add(vis_usage, &mgr->vis_usage); atomic64_add(vis_usage, &mgr->vis_usage);
mem->mm_node = &node->mm_nodes[0]; *res = &node->base;
return 0; return 0;
error_free: error_free:
...@@ -487,28 +487,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -487,28 +487,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
* Free the allocated VRAM again. * Free the allocated VRAM again.
*/ */
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *mem) struct ttm_resource *res)
{ {
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr); struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct ttm_range_mgr_node *node;
uint64_t usage = 0, vis_usage = 0; uint64_t usage = 0, vis_usage = 0;
unsigned pages = mem->num_pages; unsigned i, pages;
struct drm_mm_node *nodes;
if (!mem->mm_node)
return;
node = to_ttm_range_mgr_node(mem);
nodes = &node->mm_nodes[0];
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
while (pages) { for (i = 0, pages = res->num_pages; pages;
pages -= nodes->size; pages -= node->mm_nodes[i].size, ++i) {
drm_mm_remove_node(nodes); struct drm_mm_node *mm = &node->mm_nodes[i];
usage += nodes->size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); drm_mm_remove_node(mm);
++nodes; usage += mm->size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
} }
amdgpu_vram_mgr_do_reserve(man); amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
...@@ -533,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, ...@@ -533,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
* Allocate and fill a sg table from a VRAM allocation. * Allocate and fill a sg table from a VRAM allocation.
*/ */
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem, struct ttm_resource *res,
u64 offset, u64 length, u64 offset, u64 length,
struct device *dev, struct device *dev,
enum dma_data_direction dir, enum dma_data_direction dir,
...@@ -549,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -549,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
return -ENOMEM; return -ENOMEM;
/* Determine the number of DRM_MM nodes to export */ /* Determine the number of DRM_MM nodes to export */
amdgpu_res_first(mem, offset, length, &cursor); amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) { while (cursor.remaining) {
num_entries++; num_entries++;
amdgpu_res_next(&cursor, cursor.size); amdgpu_res_next(&cursor, cursor.size);
...@@ -569,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -569,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
* and the number of bytes from it. Access the following * and the number of bytes from it. Access the following
* DRM_MM node(s) if more buffer needs to exported * DRM_MM node(s) if more buffer needs to exported
*/ */
amdgpu_res_first(mem, offset, length, &cursor); amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) { for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + adev->gmc.aper_base; phys_addr_t phys = cursor.start + adev->gmc.aper_base;
size_t size = cursor.size; size_t size = cursor.size;
......
...@@ -250,7 +250,8 @@ EXPORT_SYMBOL(drm_gem_vram_put); ...@@ -250,7 +250,8 @@ EXPORT_SYMBOL(drm_gem_vram_put);
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo) static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
{ {
/* Keep TTM behavior for now, remove when drivers are audited */ /* Keep TTM behavior for now, remove when drivers are audited */
if (WARN_ON_ONCE(!gbo->bo.resource->mm_node)) if (WARN_ON_ONCE(!gbo->bo.resource ||
gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
return 0; return 0;
return gbo->bo.resource->start; return gbo->bo.resource->start;
......
...@@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, ...@@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
} }
} }
if (new_reg) { if (new_reg)
if (new_reg->mm_node)
nvbo->offset = (new_reg->start << PAGE_SHIFT); nvbo->offset = (new_reg->start << PAGE_SHIFT);
else
nvbo->offset = 0;
}
} }
......
...@@ -178,25 +178,24 @@ void ...@@ -178,25 +178,24 @@ void
nouveau_mem_del(struct ttm_resource *reg) nouveau_mem_del(struct ttm_resource *reg)
{ {
struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_mem *mem = nouveau_mem(reg);
if (!mem)
return;
nouveau_mem_fini(mem); nouveau_mem_fini(mem);
kfree(reg->mm_node); kfree(mem);
reg->mm_node = NULL;
} }
int int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp, nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
struct ttm_resource *reg) struct ttm_resource **res)
{ {
struct nouveau_mem *mem; struct nouveau_mem *mem;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL))) if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
mem->cli = cli; mem->cli = cli;
mem->kind = kind; mem->kind = kind;
mem->comp = comp; mem->comp = comp;
reg->mm_node = mem; *res = &mem->base;
return 0; return 0;
} }
...@@ -6,12 +6,6 @@ struct ttm_tt; ...@@ -6,12 +6,6 @@ struct ttm_tt;
#include <nvif/mem.h> #include <nvif/mem.h>
#include <nvif/vmm.h> #include <nvif/vmm.h>
static inline struct nouveau_mem *
nouveau_mem(struct ttm_resource *reg)
{
return reg->mm_node;
}
struct nouveau_mem { struct nouveau_mem {
struct ttm_resource base; struct ttm_resource base;
struct nouveau_cli *cli; struct nouveau_cli *cli;
...@@ -21,8 +15,14 @@ struct nouveau_mem { ...@@ -21,8 +15,14 @@ struct nouveau_mem {
struct nvif_vma vma[2]; struct nvif_vma vma[2];
}; };
static inline struct nouveau_mem *
nouveau_mem(struct ttm_resource *reg)
{
return container_of(reg, struct nouveau_mem, base);
}
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
struct ttm_resource *); struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource *); void nouveau_mem_del(struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *); int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
......
...@@ -45,7 +45,7 @@ static int ...@@ -45,7 +45,7 @@ static int
nouveau_vram_manager_new(struct ttm_resource_manager *man, nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *reg) struct ttm_resource **res)
{ {
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
...@@ -54,15 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man, ...@@ -54,15 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
if (drm->client.device.info.ram_size == 0) if (drm->client.device.info.ram_size == 0)
return -ENOMEM; return -ENOMEM;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret) if (ret)
return ret; return ret;
ttm_resource_init(bo, place, reg->mm_node); ttm_resource_init(bo, place, *res);
ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
if (ret) { if (ret) {
nouveau_mem_del(reg); nouveau_mem_del(*res);
return ret; return ret;
} }
...@@ -78,18 +78,18 @@ static int ...@@ -78,18 +78,18 @@ static int
nouveau_gart_manager_new(struct ttm_resource_manager *man, nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *reg) struct ttm_resource **res)
{ {
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret; int ret;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret) if (ret)
return ret; return ret;
ttm_resource_init(bo, place, reg->mm_node); ttm_resource_init(bo, place, *res);
reg->start = 0; (*res)->start = 0;
return 0; return 0;
} }
...@@ -102,27 +102,27 @@ static int ...@@ -102,27 +102,27 @@ static int
nv04_gart_manager_new(struct ttm_resource_manager *man, nv04_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *reg) struct ttm_resource **res)
{ {
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem; struct nouveau_mem *mem;
int ret; int ret;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
mem = nouveau_mem(reg);
if (ret) if (ret)
return ret; return ret;
ttm_resource_init(bo, place, reg->mm_node); mem = nouveau_mem(*res);
ttm_resource_init(bo, place, *res);
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
(long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]); (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) { if (ret) {
nouveau_mem_del(reg); nouveau_mem_del(*res);
return ret; return ret;
} }
reg->start = mem->vma[0].addr >> PAGE_SHIFT; (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0; return 0;
} }
......
...@@ -58,7 +58,7 @@ to_range_manager(struct ttm_resource_manager *man) ...@@ -58,7 +58,7 @@ to_range_manager(struct ttm_resource_manager *man)
static int ttm_range_man_alloc(struct ttm_resource_manager *man, static int ttm_range_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *mem) struct ttm_resource **res)
{ {
struct ttm_range_manager *rman = to_range_manager(man); struct ttm_range_manager *rman = to_range_manager(man);
struct ttm_range_mgr_node *node; struct ttm_range_mgr_node *node;
...@@ -83,37 +83,30 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man, ...@@ -83,37 +83,30 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
spin_lock(&rman->lock); spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
mem->num_pages, bo->page_alignment, 0, node->base.num_pages,
bo->page_alignment, 0,
place->fpfn, lpfn, mode); place->fpfn, lpfn, mode);
spin_unlock(&rman->lock); spin_unlock(&rman->lock);
if (unlikely(ret)) { if (unlikely(ret))
kfree(node); kfree(node);
} else { else
mem->mm_node = &node->mm_nodes[0]; node->base.start = node->mm_nodes[0].start;
mem->start = node->mm_nodes[0].start;
}
return ret; return ret;
} }
static void ttm_range_man_free(struct ttm_resource_manager *man, static void ttm_range_man_free(struct ttm_resource_manager *man,
struct ttm_resource *mem) struct ttm_resource *res)
{ {
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct ttm_range_manager *rman = to_range_manager(man); struct ttm_range_manager *rman = to_range_manager(man);
struct ttm_range_mgr_node *node;
if (!mem->mm_node)
return;
node = to_ttm_range_mgr_node(mem);
spin_lock(&rman->lock); spin_lock(&rman->lock);
drm_mm_remove_node(&node->mm_nodes[0]); drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&rman->lock); spin_unlock(&rman->lock);
kfree(node); kfree(node);
mem->mm_node = NULL;
} }
static void ttm_range_man_debug(struct ttm_resource_manager *man, static void ttm_range_man_debug(struct ttm_resource_manager *man,
......
...@@ -29,7 +29,6 @@ void ttm_resource_init(struct ttm_buffer_object *bo, ...@@ -29,7 +29,6 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *res) struct ttm_resource *res)
{ {
res->mm_node = NULL;
res->start = 0; res->start = 0;
res->num_pages = PFN_UP(bo->base.size); res->num_pages = PFN_UP(bo->base.size);
res->mem_type = place->mem_type; res->mem_type = place->mem_type;
...@@ -47,22 +46,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, ...@@ -47,22 +46,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
{ {
struct ttm_resource_manager *man = struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type); ttm_manager_type(bo->bdev, place->mem_type);
struct ttm_resource *res;
int r;
res = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
ttm_resource_init(bo, place, res);
r = man->func->alloc(man, bo, place, res);
if (r) {
kfree(res);
return r;
}
*res_ptr = res; return man->func->alloc(man, bo, place, res_ptr);
return 0;
} }
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
...@@ -74,7 +59,6 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) ...@@ -74,7 +59,6 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
man = ttm_manager_type(bo->bdev, (*res)->mem_type); man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res); man->func->free(man, *res);
kfree(*res);
*res = NULL; *res = NULL;
} }
EXPORT_SYMBOL(ttm_resource_free); EXPORT_SYMBOL(ttm_resource_free);
......
...@@ -10,20 +10,20 @@ ...@@ -10,20 +10,20 @@
static int ttm_sys_man_alloc(struct ttm_resource_manager *man, static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *mem) struct ttm_resource **res)
{ {
mem->mm_node = kzalloc(sizeof(*mem), GFP_KERNEL); *res = kzalloc(sizeof(**res), GFP_KERNEL);
if (!mem->mm_node) if (!*res)
return -ENOMEM; return -ENOMEM;
ttm_resource_init(bo, place, mem->mm_node); ttm_resource_init(bo, place, *res);
return 0; return 0;
} }
static void ttm_sys_man_free(struct ttm_resource_manager *man, static void ttm_sys_man_free(struct ttm_resource_manager *man,
struct ttm_resource *mem) struct ttm_resource *res)
{ {
kfree(mem->mm_node); kfree(res);
} }
static const struct ttm_resource_manager_func ttm_sys_manager_func = { static const struct ttm_resource_manager_func ttm_sys_manager_func = {
......
...@@ -52,16 +52,16 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma ...@@ -52,16 +52,16 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma
static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *mem) struct ttm_resource **res)
{ {
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
int id; int id;
mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL); *res = kmalloc(sizeof(**res), GFP_KERNEL);
if (!mem->mm_node) if (!*res)
return -ENOMEM; return -ENOMEM;
ttm_resource_init(bo, place, mem->mm_node); ttm_resource_init(bo, place, *res);
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0) if (id < 0)
...@@ -70,34 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, ...@@ -70,34 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock); spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) { if (gman->max_gmr_pages > 0) {
gman->used_gmr_pages += mem->num_pages; gman->used_gmr_pages += (*res)->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace; goto nospace;
} }
mem->mm_node = gman; (*res)->start = id;
mem->start = id;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
return 0; return 0;
nospace: nospace:
gman->used_gmr_pages -= mem->num_pages; gman->used_gmr_pages -= (*res)->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id); ida_free(&gman->gmr_ida, id);
kfree(*res);
return -ENOSPC; return -ENOSPC;
} }
static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
struct ttm_resource *mem) struct ttm_resource *res)
{ {
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
ida_free(&gman->gmr_ida, mem->start); ida_free(&gman->gmr_ida, res->start);
spin_lock(&gman->lock); spin_lock(&gman->lock);
gman->used_gmr_pages -= mem->num_pages; gman->used_gmr_pages -= res->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
kfree(mem->mm_node); kfree(res);
} }
static const struct ttm_resource_manager_func vmw_gmrid_manager_func; static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
......
...@@ -51,7 +51,7 @@ static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo, ...@@ -51,7 +51,7 @@ static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
static int vmw_thp_get_node(struct ttm_resource_manager *man, static int vmw_thp_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *mem) struct ttm_resource **res)
{ {
struct vmw_thp_manager *rman = to_thp_manager(man); struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm; struct drm_mm *mm = &rman->mm;
...@@ -78,26 +78,27 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man, ...@@ -78,26 +78,27 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
spin_lock(&rman->lock); spin_lock(&rman->lock);
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) { if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
if (mem->num_pages >= align_pages) { if (node->base.num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0], ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
align_pages, place, mem, align_pages, place,
lpfn, mode); &node->base, lpfn, mode);
if (!ret) if (!ret)
goto found_unlock; goto found_unlock;
} }
} }
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT); align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
if (mem->num_pages >= align_pages) { if (node->base.num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0], ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
align_pages, place, mem, lpfn, align_pages, place, &node->base,
mode); lpfn, mode);
if (!ret) if (!ret)
goto found_unlock; goto found_unlock;
} }
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
mem->num_pages, bo->page_alignment, 0, node->base.num_pages,
bo->page_alignment, 0,
place->fpfn, lpfn, mode); place->fpfn, lpfn, mode);
found_unlock: found_unlock:
spin_unlock(&rman->lock); spin_unlock(&rman->lock);
...@@ -105,20 +106,18 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man, ...@@ -105,20 +106,18 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
if (unlikely(ret)) { if (unlikely(ret)) {
kfree(node); kfree(node);
} else { } else {
mem->mm_node = &node->mm_nodes[0]; node->base.start = node->mm_nodes[0].start;
mem->start = node->mm_nodes[0].start; *res = &node->base;
} }
return ret; return ret;
} }
static void vmw_thp_put_node(struct ttm_resource_manager *man, static void vmw_thp_put_node(struct ttm_resource_manager *man,
struct ttm_resource *mem) struct ttm_resource *res)
{ {
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct vmw_thp_manager *rman = to_thp_manager(man); struct vmw_thp_manager *rman = to_thp_manager(man);
struct ttm_range_mgr_node * node = mem->mm_node;
spin_lock(&rman->lock); spin_lock(&rman->lock);
drm_mm_remove_node(&node->mm_nodes[0]); drm_mm_remove_node(&node->mm_nodes[0]);
......
...@@ -30,8 +30,7 @@ struct ttm_range_mgr_node { ...@@ -30,8 +30,7 @@ struct ttm_range_mgr_node {
static inline struct ttm_range_mgr_node * static inline struct ttm_range_mgr_node *
to_ttm_range_mgr_node(struct ttm_resource *res) to_ttm_range_mgr_node(struct ttm_resource *res)
{ {
return container_of(res->mm_node, struct ttm_range_mgr_node, return container_of(res, struct ttm_range_mgr_node, base);
mm_nodes[0]);
} }
int ttm_range_man_init(struct ttm_device *bdev, int ttm_range_man_init(struct ttm_device *bdev,
......
...@@ -45,46 +45,38 @@ struct ttm_resource_manager_func { ...@@ -45,46 +45,38 @@ struct ttm_resource_manager_func {
* *
* @man: Pointer to a memory type manager. * @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for. * @bo: Pointer to the buffer object we're allocating space for.
* @placement: Placement details. * @place: Placement details.
* @flags: Additional placement flags. * @res: Resulting pointer to the ttm_resource.
* @mem: Pointer to a struct ttm_resource to be filled in.
* *
* This function should allocate space in the memory type managed * This function should allocate space in the memory type managed
* by @man. Placement details if * by @man. Placement details if applicable are given by @place. If
* applicable are given by @placement. If successful, * successful, a filled in ttm_resource object should be returned in
* @mem::mm_node should be set to a non-null value, and * @res. @res::start should be set to a value identifying the beginning
* @mem::start should be set to a value identifying the beginning
* of the range allocated, and the function should return zero. * of the range allocated, and the function should return zero.
* If the memory region accommodate the buffer object, @mem::mm_node * If the manager can't fulfill the request -ENOSPC should be returned.
* should be set to NULL, and the function should return 0.
* If a system error occurred, preventing the request to be fulfilled, * If a system error occurred, preventing the request to be fulfilled,
* the function should return a negative error code. * the function should return a negative error code.
* *
* Note that @mem::mm_node will only be dereferenced by * This function may not be called from within atomic context and needs
* struct ttm_resource_manager functions and optionally by the driver, * to take care of its own locking to protect any data structures
* which has knowledge of the underlying type. * managing the space.
*
* This function may not be called from within atomic context, so
* an implementation can and must use either a mutex or a spinlock to
* protect any data structures managing the space.
*/ */
int (*alloc)(struct ttm_resource_manager *man, int (*alloc)(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_resource *mem); struct ttm_resource **res);
/** /**
* struct ttm_resource_manager_func member free * struct ttm_resource_manager_func member free
* *
* @man: Pointer to a memory type manager. * @man: Pointer to a memory type manager.
* @mem: Pointer to a struct ttm_resource to be filled in. * @res: Pointer to a struct ttm_resource to be freed.
* *
* This function frees memory type resources previously allocated * This function frees memory type resources previously allocated.
* and that are identified by @mem::mm_node and @mem::start. May not * May not be called from within atomic context.
* be called from within atomic context.
*/ */
void (*free)(struct ttm_resource_manager *man, void (*free)(struct ttm_resource_manager *man,
struct ttm_resource *mem); struct ttm_resource *res);
/** /**
* struct ttm_resource_manager_func member debug * struct ttm_resource_manager_func member debug
...@@ -158,9 +150,9 @@ struct ttm_bus_placement { ...@@ -158,9 +150,9 @@ struct ttm_bus_placement {
/** /**
* struct ttm_resource * struct ttm_resource
* *
* @mm_node: Memory manager node. * @start: Start of the allocation.
* @size: Requested size of memory region. * @num_pages: Actual size of resource in pages.
* @num_pages: Actual size of memory region in pages. * @mem_type: Resource type of the allocation.
* @placement: Placement flags. * @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU * @bus: Placement on io bus accessible to the CPU
* *
...@@ -168,7 +160,6 @@ struct ttm_bus_placement { ...@@ -168,7 +160,6 @@ struct ttm_bus_placement {
* buffer object. * buffer object.
*/ */
struct ttm_resource { struct ttm_resource {
void *mm_node;
unsigned long start; unsigned long start;
unsigned long num_pages; unsigned long num_pages;
uint32_t mem_type; uint32_t mem_type;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment