Commit f07069da authored by Christian König's avatar Christian König

drm/ttm: move memory accounting into vmwgfx v4

This is just another feature which is only used by VMWGFX, so move
it into the driver instead.

I've tried to add the accounting sysfs file to the kobject of the drm
minor, but I'm not 100% sure if this works as expected.

v2: fix typo in KFD and avoid 64bit divide
v3: fix init order in VMWGFX
v4: use pdev sysfs reference instead of drm
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Zack Rusin <zackr@vmware.com> (v3)
Tested-by: default avatarNirmoy Das <nirmoy.das@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210208133226.36955-2-christian.koenig@amd.com
parent d4bd7776
...@@ -118,6 +118,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) ...@@ -118,6 +118,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
*/ */
#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
static size_t amdgpu_amdkfd_acc_size(uint64_t size)
{
size >>= PAGE_SHIFT;
size *= sizeof(dma_addr_t) + sizeof(void *);
return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
__roundup_pow_of_two(sizeof(struct ttm_tt)) +
PAGE_ALIGN(size);
}
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg) uint64_t size, u32 domain, bool sg)
{ {
...@@ -126,8 +136,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, ...@@ -126,8 +136,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
int ret = 0; int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, acc_size = amdgpu_amdkfd_acc_size(size);
sizeof(struct amdgpu_bo));
vram_needed = 0; vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) { if (domain == AMDGPU_GEM_DOMAIN_GTT) {
...@@ -174,8 +183,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev, ...@@ -174,8 +183,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
{ {
size_t acc_size; size_t acc_size;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, acc_size = amdgpu_amdkfd_acc_size(size);
sizeof(struct amdgpu_bo));
spin_lock(&kfd_mem_limit.mem_limit_lock); spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) { if (domain == AMDGPU_GEM_DOMAIN_GTT) {
......
...@@ -523,7 +523,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -523,7 +523,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
}; };
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size; unsigned long page_align, size = bp->size;
size_t acc_size;
int r; int r;
/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
...@@ -546,9 +545,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -546,9 +545,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
*bo_ptr = NULL; *bo_ptr = NULL;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL) if (bo == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -577,8 +573,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -577,8 +573,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.priority = 1; bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, acc_size, &bo->placement, page_align, &ctx, NULL,
NULL, bp->resv, &amdgpu_bo_destroy); bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
......
...@@ -189,7 +189,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, ...@@ -189,7 +189,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
struct drm_vram_mm *vmm = dev->vram_mm; struct drm_vram_mm *vmm = dev->vram_mm;
struct ttm_device *bdev; struct ttm_device *bdev;
int ret; int ret;
size_t acc_size;
if (WARN_ONCE(!vmm, "VRAM MM not initialized")) if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -216,7 +215,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, ...@@ -216,7 +215,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
} }
bdev = &vmm->bdev; bdev = &vmm->bdev;
acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
gbo->bo.bdev = bdev; gbo->bo.bdev = bdev;
drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
...@@ -226,8 +224,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, ...@@ -226,8 +224,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
* to release gbo->bo.base and kfree gbo. * to release gbo->bo.base and kfree gbo.
*/ */
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
&gbo->placement, pg_align, false, acc_size, &gbo->placement, pg_align, false, NULL, NULL,
NULL, NULL, ttm_buffer_object_destroy); ttm_buffer_object_destroy);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -300,18 +300,15 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, ...@@ -300,18 +300,15 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj) struct sg_table *sg, struct dma_resv *robj)
{ {
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
size_t acc_size;
int ret; int ret;
acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, domain, 0); nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru); INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false, &nvbo->placement, align >> PAGE_SHIFT, false, sg,
acc_size, sg, robj, nouveau_bo_del_ttm); robj, nouveau_bo_del_ttm);
if (ret) { if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */ /* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret; return ret;
......
...@@ -54,7 +54,6 @@ ...@@ -54,7 +54,6 @@
#include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
#include <drm/drm_audio_component.h> #include <drm/drm_audio_component.h>
......
...@@ -138,8 +138,8 @@ int qxl_bo_create(struct qxl_device *qdev, ...@@ -138,8 +138,8 @@ int qxl_bo_create(struct qxl_device *qdev,
qxl_ttm_placement_from_domain(bo, domain); qxl_ttm_placement_from_domain(bo, domain);
r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, &ctx, size, &bo->placement, 0, &ctx, NULL, NULL,
NULL, NULL, &qxl_ttm_bo_destroy); &qxl_ttm_bo_destroy);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
dev_err(qdev->ddev.dev, dev_err(qdev->ddev.dev,
......
...@@ -159,7 +159,6 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -159,7 +159,6 @@ int radeon_bo_create(struct radeon_device *rdev,
struct radeon_bo *bo; struct radeon_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
size_t acc_size;
int r; int r;
size = ALIGN(size, PAGE_SIZE); size = ALIGN(size, PAGE_SIZE);
...@@ -173,9 +172,6 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -173,9 +172,6 @@ int radeon_bo_create(struct radeon_device *rdev,
} }
*bo_ptr = NULL; *bo_ptr = NULL;
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
sizeof(struct radeon_bo));
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL) if (bo == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -230,8 +226,8 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -230,8 +226,8 @@ int radeon_bo_create(struct radeon_device *rdev,
/* Kernel allocation are uninterruptible */ /* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock); down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, acc_size, &bo->placement, page_align, !kernel, sg, resv,
sg, resv, &radeon_ttm_bo_destroy); &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock); up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
return r; return r;
......
...@@ -2,10 +2,9 @@ ...@@ -2,10 +2,9 @@
# #
# Makefile for the drm device driver. This driver provides support for the # Makefile for the drm device driver. This driver provides support for the
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
ttm_execbuf_util.o ttm_range_manager.o \ ttm_device.o
ttm_resource.o ttm_pool.o ttm_device.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o obj-$(CONFIG_DRM_TTM) += ttm.o
...@@ -425,7 +425,6 @@ static void ttm_bo_release(struct kref *kref) ...@@ -425,7 +425,6 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo = struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref); container_of(kref, struct ttm_buffer_object, kref);
struct ttm_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
int ret; int ret;
if (!bo->deleted) { if (!bo->deleted) {
...@@ -485,7 +484,6 @@ static void ttm_bo_release(struct kref *kref) ...@@ -485,7 +484,6 @@ static void ttm_bo_release(struct kref *kref)
if (!ttm_bo_uses_embedded_gem_object(bo)) if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv); dma_resv_fini(&bo->base._resv);
bo->destroy(bo); bo->destroy(bo);
ttm_mem_global_free(&ttm_mem_glob, acc_size);
} }
void ttm_bo_put(struct ttm_buffer_object *bo) void ttm_bo_put(struct ttm_buffer_object *bo)
...@@ -1046,25 +1044,13 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, ...@@ -1046,25 +1044,13 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
struct ttm_operation_ctx *ctx, struct ttm_operation_ctx *ctx,
size_t acc_size,
struct sg_table *sg, struct sg_table *sg,
struct dma_resv *resv, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *)) void (*destroy) (struct ttm_buffer_object *))
{ {
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
bool locked; bool locked;
int ret = 0; int ret = 0;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
if (ret) {
pr_err("Out of kernel memory\n");
if (destroy)
(*destroy)(bo);
else
kfree(bo);
return -ENOMEM;
}
bo->destroy = destroy ? destroy : ttm_bo_default_destroy; bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref); kref_init(&bo->kref);
...@@ -1081,7 +1067,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, ...@@ -1081,7 +1067,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bo->mem.bus.addr = NULL; bo->mem.bus.addr = NULL;
bo->moving = NULL; bo->moving = NULL;
bo->mem.placement = 0; bo->mem.placement = 0;
bo->acc_size = acc_size;
bo->pin_count = 0; bo->pin_count = 0;
bo->sg = sg; bo->sg = sg;
if (resv) { if (resv) {
...@@ -1142,7 +1127,6 @@ int ttm_bo_init(struct ttm_device *bdev, ...@@ -1142,7 +1127,6 @@ int ttm_bo_init(struct ttm_device *bdev,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
bool interruptible, bool interruptible,
size_t acc_size,
struct sg_table *sg, struct sg_table *sg,
struct dma_resv *resv, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *)) void (*destroy) (struct ttm_buffer_object *))
...@@ -1151,8 +1135,7 @@ int ttm_bo_init(struct ttm_device *bdev, ...@@ -1151,8 +1135,7 @@ int ttm_bo_init(struct ttm_device *bdev,
int ret; int ret;
ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
page_alignment, &ctx, acc_size, page_alignment, &ctx, sg, resv, destroy);
sg, resv, destroy);
if (ret) if (ret)
return ret; return ret;
...@@ -1163,20 +1146,6 @@ int ttm_bo_init(struct ttm_device *bdev, ...@@ -1163,20 +1146,6 @@ int ttm_bo_init(struct ttm_device *bdev,
} }
EXPORT_SYMBOL(ttm_bo_init); EXPORT_SYMBOL(ttm_bo_init);
size_t ttm_bo_dma_acc_size(struct ttm_device *bdev,
unsigned long bo_size,
unsigned struct_size)
{
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
size_t size = 0;
size += ttm_round_pot(struct_size);
size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
size += ttm_round_pot(sizeof(struct ttm_tt));
return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
/* /*
* buffer object vm functions. * buffer object vm functions.
*/ */
......
...@@ -309,7 +309,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ...@@ -309,7 +309,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->base.kref); kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy; fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
fbo->base.pin_count = 0; fbo->base.pin_count = 0;
if (bo->type != ttm_bo_type_sg) if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv; fbo->base.base.resv = &fbo->base.base._resv;
......
...@@ -27,9 +27,12 @@ ...@@ -27,9 +27,12 @@
#define pr_fmt(fmt) "[TTM DEVICE] " fmt #define pr_fmt(fmt) "[TTM DEVICE] " fmt
#include <linux/mm.h>
#include <drm/ttm/ttm_device.h> #include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_memory.h> #include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_api.h>
#include "ttm_module.h" #include "ttm_module.h"
...@@ -49,7 +52,9 @@ static void ttm_global_release(void) ...@@ -49,7 +52,9 @@ static void ttm_global_release(void)
if (--ttm_glob_use_count > 0) if (--ttm_glob_use_count > 0)
goto out; goto out;
ttm_mem_global_release(&ttm_mem_glob); ttm_pool_mgr_fini();
ttm_tt_mgr_fini();
__free_page(glob->dummy_read_page); __free_page(glob->dummy_read_page);
memset(glob, 0, sizeof(*glob)); memset(glob, 0, sizeof(*glob));
out: out:
...@@ -59,6 +64,8 @@ static void ttm_global_release(void) ...@@ -59,6 +64,8 @@ static void ttm_global_release(void)
static int ttm_global_init(void) static int ttm_global_init(void)
{ {
struct ttm_global *glob = &ttm_glob; struct ttm_global *glob = &ttm_glob;
unsigned long num_pages;
struct sysinfo si;
int ret = 0; int ret = 0;
unsigned i; unsigned i;
...@@ -66,9 +73,14 @@ static int ttm_global_init(void) ...@@ -66,9 +73,14 @@ static int ttm_global_init(void)
if (++ttm_glob_use_count > 1) if (++ttm_glob_use_count > 1)
goto out; goto out;
ret = ttm_mem_global_init(&ttm_mem_glob); si_meminfo(&si);
if (ret)
goto out; /* Limit the number of pages in the pool to about 50% of the total
* system memory.
*/
num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
ttm_pool_mgr_init(num_pages * 50 / 100);
ttm_tt_mgr_init();
spin_lock_init(&glob->lru_lock); spin_lock_init(&glob->lru_lock);
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
......
...@@ -404,16 +404,10 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, ...@@ -404,16 +404,10 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
caching = pages + (1 << order); caching = pages + (1 << order);
} }
r = ttm_mem_global_alloc_page(&ttm_mem_glob, p,
(1 << order) * PAGE_SIZE,
ctx);
if (r)
goto error_free_page;
if (dma_addr) { if (dma_addr) {
r = ttm_pool_map(pool, order, p, &dma_addr); r = ttm_pool_map(pool, order, p, &dma_addr);
if (r) if (r)
goto error_global_free; goto error_free_page;
} }
num_pages -= 1 << order; num_pages -= 1 << order;
...@@ -427,9 +421,6 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, ...@@ -427,9 +421,6 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
return 0; return 0;
error_global_free:
ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE);
error_free_page: error_free_page:
ttm_pool_free_page(pool, tt->caching, order, p); ttm_pool_free_page(pool, tt->caching, order, p);
...@@ -464,8 +455,6 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) ...@@ -464,8 +455,6 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
order = ttm_pool_page_order(pool, p); order = ttm_pool_page_order(pool, p);
num_pages = 1ULL << order; num_pages = 1ULL << order;
ttm_mem_global_free_page(&ttm_mem_glob, p,
num_pages * PAGE_SIZE);
if (tt->dma_address) if (tt->dma_address)
ttm_pool_unmap(pool, tt->dma_address[i], num_pages); ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
......
...@@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ ...@@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
ttm_object.o ttm_lock.o ttm_object.o ttm_lock.o ttm_memory.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#define pr_fmt(fmt) "[TTM] " fmt #define pr_fmt(fmt) "[TTM] " fmt
#include <drm/ttm/ttm_memory.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/wait.h> #include <linux/wait.h>
...@@ -36,10 +35,11 @@ ...@@ -36,10 +35,11 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <drm/ttm/ttm_pool.h>
#include <drm/ttm/ttm_tt.h>
#include "ttm_module.h" #include <drm/drm_device.h>
#include <drm/drm_file.h>
#include "ttm_memory.h"
#define TTM_MEMORY_ALLOC_RETRIES 4 #define TTM_MEMORY_ALLOC_RETRIES 4
...@@ -414,7 +414,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, ...@@ -414,7 +414,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
} }
#endif #endif
int ttm_mem_global_init(struct ttm_mem_global *glob) int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
{ {
struct sysinfo si; struct sysinfo si;
int ret; int ret;
...@@ -424,8 +424,9 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) ...@@ -424,8 +424,9 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
spin_lock_init(&glob->lock); spin_lock_init(&glob->lock);
glob->swap_queue = create_singlethread_workqueue("ttm_swap"); glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work); INIT_WORK(&glob->work, ttm_shrink_work);
ret = kobject_init_and_add(
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type,
&dev->kobj, "memory_accounting");
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
kobject_put(&glob->kobj); kobject_put(&glob->kobj);
return ret; return ret;
...@@ -453,8 +454,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) ...@@ -453,8 +454,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
pr_info("Zone %7s: Available graphics memory: %llu KiB\n", pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10); zone->name, (unsigned long long)zone->max_mem >> 10);
} }
ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_tt_mgr_init();
return 0; return 0;
out_no_zone: out_no_zone:
ttm_mem_global_release(glob); ttm_mem_global_release(glob);
...@@ -466,10 +465,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) ...@@ -466,10 +465,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
struct ttm_mem_zone *zone; struct ttm_mem_zone *zone;
unsigned int i; unsigned int i;
/* let the page allocator first stop the shrink work. */
ttm_pool_mgr_fini();
ttm_tt_mgr_fini();
flush_workqueue(glob->swap_queue); flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL; glob->swap_queue = NULL;
......
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/mm.h> #include <linux/mm.h>
#include "ttm_bo_api.h"
#include <drm/ttm/ttm_bo_api.h>
/** /**
* struct ttm_mem_global - Global memory accounting structure. * struct ttm_mem_global - Global memory accounting structure.
...@@ -79,7 +80,7 @@ extern struct ttm_mem_global { ...@@ -79,7 +80,7 @@ extern struct ttm_mem_global {
#endif #endif
} ttm_mem_glob; } ttm_mem_glob;
int ttm_mem_global_init(struct ttm_mem_global *glob); int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev);
void ttm_mem_global_release(struct ttm_mem_global *glob); void ttm_mem_global_release(struct ttm_mem_global *glob);
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
struct ttm_operation_ctx *ctx); struct ttm_operation_ctx *ctx);
......
...@@ -43,7 +43,8 @@ ...@@ -43,7 +43,8 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <drm/drm_hashtab.h> #include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_memory.h>
#include "ttm_memory.h"
/** /**
* enum ttm_ref_type * enum ttm_ref_type
......
...@@ -507,11 +507,16 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, ...@@ -507,11 +507,16 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
acc_size = ttm_round_pot(sizeof(*bo)); acc_size = ttm_round_pot(sizeof(*bo));
acc_size += ttm_round_pot(npages * sizeof(void *)); acc_size += ttm_round_pot(npages * sizeof(void *));
acc_size += ttm_round_pot(sizeof(struct ttm_tt)); acc_size += ttm_round_pot(sizeof(struct ttm_tt));
ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
if (unlikely(ret))
goto error_free;
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
ttm_bo_type_device, placement, 0, ttm_bo_type_device, placement, 0,
&ctx, acc_size, NULL, NULL, NULL); &ctx, NULL, NULL, NULL);
if (unlikely(ret)) if (unlikely(ret))
goto error_free; goto error_account;
ttm_bo_pin(bo); ttm_bo_pin(bo);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
...@@ -519,6 +524,9 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, ...@@ -519,6 +524,9 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
return 0; return 0;
error_account:
ttm_mem_global_free(&ttm_mem_glob, acc_size);
error_free: error_free:
kfree(bo); kfree(bo);
return ret; return ret;
...@@ -558,11 +566,17 @@ int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -558,11 +566,17 @@ int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo->base.priority = 3; vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT; vmw_bo->res_tree = RB_ROOT;
ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
if (unlikely(ret))
return ret;
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement, ttm_bo_type_device, placement,
0, &ctx, acc_size, NULL, NULL, bo_free); 0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) if (unlikely(ret)) {
ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret; return ret;
}
if (pin) if (pin)
ttm_bo_pin(&vmw_bo->base); ttm_bo_pin(&vmw_bo->base);
......
...@@ -1268,6 +1268,7 @@ static void vmw_remove(struct pci_dev *pdev) ...@@ -1268,6 +1268,7 @@ static void vmw_remove(struct pci_dev *pdev)
{ {
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
ttm_mem_global_release(&ttm_mem_glob);
drm_dev_unregister(dev); drm_dev_unregister(dev);
vmw_driver_unload(dev); vmw_driver_unload(dev);
} }
...@@ -1518,6 +1519,10 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1518,6 +1519,10 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, &vmw->drm); pci_set_drvdata(pdev, &vmw->drm);
ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
if (ret)
return ret;
ret = vmw_driver_load(vmw, ent->device); ret = vmw_driver_load(vmw, ent->device);
if (ret) if (ret)
return ret; return ret;
......
...@@ -576,11 +576,31 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) ...@@ -576,11 +576,31 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev, static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
unsigned int i;
int ret;
/* TODO: maybe completely drop this ? */ /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm)) if (ttm_tt_is_populated(ttm))
return 0; return 0;
return ttm_pool_alloc(&bdev->pool, ttm, ctx); ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret)
return ret;
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
PAGE_SIZE, ctx);
if (ret)
goto error;
}
return 0;
error:
while (i--)
ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
PAGE_SIZE);
ttm_pool_free(&bdev->pool, ttm);
return ret;
} }
static void vmw_ttm_unpopulate(struct ttm_device *bdev, static void vmw_ttm_unpopulate(struct ttm_device *bdev,
...@@ -588,6 +608,7 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, ...@@ -588,6 +608,7 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{ {
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm); dma_ttm);
unsigned int i;
if (vmw_tt->mob) { if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob); vmw_mob_destroy(vmw_tt->mob);
...@@ -595,6 +616,11 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, ...@@ -595,6 +616,11 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
} }
vmw_ttm_unmap_dma(vmw_tt); vmw_ttm_unmap_dma(vmw_tt);
for (i = 0; i < ttm->num_pages; ++i)
ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
PAGE_SIZE);
ttm_pool_free(&bdev->pool, ttm); ttm_pool_free(&bdev->pool, ttm);
} }
......
...@@ -88,7 +88,6 @@ struct ttm_tt; ...@@ -88,7 +88,6 @@ struct ttm_tt;
* @type: The bo type. * @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used. * @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages. * @num_pages: Actual number of pages.
* @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches * @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is destroyed or put on the delayed delete list. * zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement. * @mem: structure describing current placement.
...@@ -125,7 +124,6 @@ struct ttm_buffer_object { ...@@ -125,7 +124,6 @@ struct ttm_buffer_object {
struct ttm_device *bdev; struct ttm_device *bdev;
enum ttm_bo_type type; enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *); void (*destroy) (struct ttm_buffer_object *);
size_t acc_size;
/** /**
* Members not needing protection. * Members not needing protection.
...@@ -357,10 +355,6 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched); ...@@ -357,10 +355,6 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place); const struct ttm_place *place);
size_t ttm_bo_dma_acc_size(struct ttm_device *bdev,
unsigned long bo_size,
unsigned struct_size);
/** /**
* ttm_bo_init_reserved * ttm_bo_init_reserved
* *
...@@ -371,7 +365,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_device *bdev, ...@@ -371,7 +365,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_device *bdev,
* @flags: Initial placement flags. * @flags: Initial placement flags.
* @page_alignment: Data alignment in pages. * @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation. * @ctx: TTM operation context for memory allocation.
* @acc_size: Accounted size for this object.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree(). * @destroy: Destroy function. Use NULL for kfree().
* *
...@@ -402,8 +395,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, ...@@ -402,8 +395,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
struct ttm_operation_ctx *ctx, struct ttm_operation_ctx *ctx,
size_t acc_size, struct sg_table *sg, struct sg_table *sg, struct dma_resv *resv,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *)); void (*destroy) (struct ttm_buffer_object *));
/** /**
...@@ -421,7 +413,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, ...@@ -421,7 +413,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
* holds a pointer to a persistent shmem object. Typically, this would * holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a * point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface. * GEM user interface.
* @acc_size: Accounted size for this object.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree(). * @destroy: Destroy function. Use NULL for kfree().
* *
...@@ -446,7 +437,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, ...@@ -446,7 +437,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo, int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type, size_t size, enum ttm_bo_type type,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, bool interrubtible, size_t acc_size, uint32_t page_alignment, bool interrubtible,
struct sg_table *sg, struct dma_resv *resv, struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *)); void (*destroy) (struct ttm_buffer_object *));
......
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
#include <drm/ttm/ttm_device.h> #include <drm/ttm/ttm_device.h>
#include "ttm_bo_api.h" #include "ttm_bo_api.h"
#include "ttm_memory.h"
#include "ttm_placement.h" #include "ttm_placement.h"
#include "ttm_tt.h" #include "ttm_tt.h"
#include "ttm_pool.h" #include "ttm_pool.h"
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <drm/ttm/ttm_caching.h> #include <drm/ttm/ttm_caching.h>
struct ttm_bo_device;
struct ttm_tt; struct ttm_tt;
struct ttm_resource; struct ttm_resource;
struct ttm_buffer_object; struct ttm_buffer_object;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment