Commit ff5f643d authored by Donald Robson's avatar Donald Robson Committed by Maxime Ripard

drm/imagination: Add GEM and VM related code

Add a GEM implementation based on drm_gem_shmem, and support code for the
PowerVR GPU MMU. The GPU VA manager is used for address space management.

Changes since v8:
- Updated for changes to drm_gpuvm
- Switched to dma_resv locking for vm ops
- Removed linked lists for collecting BOs in vm_context and for freeing
  after ops. This is now done internally in drm_gpuvm
- Corrected license identifiers

Changes since v7:
- kernel-doc fixes
- Remove prefixes from DRM_PVR_BO_* flags
- CREATE_BO ioctl now returns an error if provided size isn't page aligned
- Optimised MMU flushes

Changes since v6:
- Don't initialise kernel_vm_ctx when using MIPS firmware processor
- Rename drm_gpuva_manager uses to drm_gpuvm
- Sync GEM object to device on creation

Changes since v5:
- Use WRITE_ONCE() when writing to page tables
- Add memory barriers to page table insertion
- Fixed double backing page alloc on page table objects
- Fix BO mask checks in DRM_IOCTL_PVR_CREATE_BO handler
- Document use of pvr_page_table_*_idx when preallocing page table objs
- Remove pvr_vm_gpuva_mapping_init()
- Remove NULL check for unmap op in remap function
- Protect gem object with mutex during drm_gpuva_link/unlink
- Defer free or release of page table pages until after TLB flush
- Use drm_gpuva_op_remap_get_unmap_range() helper

Changes since v4:
- Correct sync function in vmap/vunmap function documentation
- Update for upstream GPU VA manager
- Fix missing frees when unmapping drm_gpuva objects
- Always zero GEM BOs on creation

Changes since v3:
- Split MMU and VM code
- Register page table allocations with kmemleak
- Use drm_dev_{enter,exit}

Changes since v2:
- Use GPU VA manager
- Use drm_gem_shmem
Co-developed-by: default avatarMatt Coster <matt.coster@imgtec.com>
Signed-off-by: default avatarMatt Coster <matt.coster@imgtec.com>
Co-developed-by: default avatarDonald Robson <donald.robson@imgtec.com>
Signed-off-by: default avatarDonald Robson <donald.robson@imgtec.com>
Signed-off-by: default avatarSarah Walker <sarah.walker@imgtec.com>
Link: https://lore.kernel.org/r/3c96dd170efe759b73897e3675d7310a7c4b06d0.1700668843.git.donald.robson@imgtec.comSigned-off-by: default avatarMaxime Ripard <mripard@kernel.org>
parent f99f5f3e
......@@ -7,6 +7,7 @@ config DRM_POWERVR
depends on DRM
select DRM_GEM_SHMEM_HELPER
select DRM_SCHED
select DRM_GPUVM
select FW_LOADER
help
Choose this option if you have a system that has an Imagination
......
......@@ -7,6 +7,9 @@ powervr-y := \
pvr_device.o \
pvr_device_info.o \
pvr_drv.o \
pvr_fw.o
pvr_fw.o \
pvr_gem.o \
pvr_mmu.o \
pvr_vm.o
obj-$(CONFIG_DRM_POWERVR) += powervr.o
......@@ -6,6 +6,7 @@
#include "pvr_fw.h"
#include "pvr_rogue_cr_defs.h"
#include "pvr_vm.h"
#include <drm/drm_print.h>
......@@ -312,7 +313,30 @@ pvr_device_gpu_init(struct pvr_device *pvr_dev)
else
return -EINVAL;
return pvr_set_dma_info(pvr_dev);
err = pvr_set_dma_info(pvr_dev);
if (err)
return err;
if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
if (IS_ERR(pvr_dev->kernel_vm_ctx))
return PTR_ERR(pvr_dev->kernel_vm_ctx);
}
return 0;
}
/**
* pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
* @pvr_dev: Target PowerVR device.
*/
static void
pvr_device_gpu_fini(struct pvr_device *pvr_dev)
{
if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
pvr_dev->kernel_vm_ctx = NULL;
}
}
/**
......@@ -364,6 +388,7 @@ pvr_device_fini(struct pvr_device *pvr_dev)
* Deinitialization stages are performed in reverse order compared to
* the initialization stages in pvr_device_init().
*/
pvr_device_gpu_fini(pvr_dev);
}
bool
......
......@@ -123,8 +123,24 @@ struct pvr_device {
*/
struct clk *mem_clk;
/**
* @kernel_vm_ctx: Virtual memory context used for kernel mappings.
*
* This is used for mappings in the firmware address region when a META firmware processor
* is in use.
*
* When a MIPS firmware processor is in use, this will be %NULL.
*/
struct pvr_vm_context *kernel_vm_ctx;
/** @fw_dev: Firmware related data. */
struct pvr_fw_device fw_dev;
/**
* @mmu_flush_cache_flags: Records which MMU caches require flushing
* before submitting the next job.
*/
atomic_t mmu_flush_cache_flags;
};
/**
......@@ -145,6 +161,14 @@ struct pvr_file {
* to_pvr_device().
*/
struct pvr_device *pvr_dev;
/**
* @vm_ctx_handles: Array of VM contexts belonging to this file. Array
* members are of type "struct pvr_vm_context *".
*
* This array is used to allocate handles returned to userspace.
*/
struct xarray vm_ctx_handles;
};
/**
......
......@@ -3,9 +3,12 @@
#include "pvr_device.h"
#include "pvr_drv.h"
#include "pvr_gem.h"
#include "pvr_mmu.h"
#include "pvr_rogue_defs.h"
#include "pvr_rogue_fwif_client.h"
#include "pvr_rogue_fwif_shared.h"
#include "pvr_vm.h"
#include <uapi/drm/pvr_drm.h>
......@@ -60,7 +63,72 @@ static int
pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args,
struct drm_file *file)
{
return -ENOTTY;
struct drm_pvr_ioctl_create_bo_args *args = raw_args;
struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
struct pvr_file *pvr_file = to_pvr_file(file);
struct pvr_gem_object *pvr_obj;
size_t sanitized_size;
int idx;
int err;
if (!drm_dev_enter(drm_dev, &idx))
return -EIO;
/* All padding fields must be zeroed. */
if (args->_padding_c != 0) {
err = -EINVAL;
goto err_drm_dev_exit;
}
/*
* On 64-bit platforms (our primary target), size_t is a u64. However,
* on other architectures we have to check for overflow when casting
* down to size_t from u64.
*
* We also disallow zero-sized allocations, and reserved (kernel-only)
* flags.
*/
if (args->size > SIZE_MAX || args->size == 0 || args->flags &
~DRM_PVR_BO_FLAGS_MASK || args->size & (PVR_DEVICE_PAGE_SIZE - 1)) {
err = -EINVAL;
goto err_drm_dev_exit;
}
sanitized_size = (size_t)args->size;
/*
* Create a buffer object and transfer ownership to a userspace-
* accessible handle.
*/
pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags);
if (IS_ERR(pvr_obj)) {
err = PTR_ERR(pvr_obj);
goto err_drm_dev_exit;
}
/* This function will not modify &args->handle unless it succeeds. */
err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle);
if (err)
goto err_destroy_obj;
drm_dev_exit(idx);
return 0;
err_destroy_obj:
/*
* GEM objects are refcounted, so there is no explicit destructor
* function. Instead, we release the singular reference we currently
* hold on the object and let GEM take care of the rest.
*/
pvr_gem_object_put(pvr_obj);
err_drm_dev_exit:
drm_dev_exit(idx);
return err;
}
/**
......@@ -87,7 +155,61 @@ static int
pvr_ioctl_get_bo_mmap_offset(struct drm_device *drm_dev, void *raw_args,
struct drm_file *file)
{
return -ENOTTY;
struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args;
struct pvr_file *pvr_file = to_pvr_file(file);
struct pvr_gem_object *pvr_obj;
struct drm_gem_object *gem_obj;
int idx;
int ret;
if (!drm_dev_enter(drm_dev, &idx))
return -EIO;
/* All padding fields must be zeroed. */
if (args->_padding_4 != 0) {
ret = -EINVAL;
goto err_drm_dev_exit;
}
/*
* Obtain a kernel reference to the buffer object. This reference is
* counted and must be manually dropped before returning. If a buffer
* object cannot be found for the specified handle, return -%ENOENT (No
* such file or directory).
*/
pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
if (!pvr_obj) {
ret = -ENOENT;
goto err_drm_dev_exit;
}
gem_obj = gem_from_pvr_gem(pvr_obj);
/*
* Allocate a fake offset which can be used in userspace calls to mmap
* on the DRM device file. If this fails, return the error code. This
* operation is idempotent.
*/
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret != 0) {
/* Drop our reference to the buffer object. */
drm_gem_object_put(gem_obj);
goto err_drm_dev_exit;
}
/*
* Read out the fake offset allocated by the earlier call to
* drm_gem_create_mmap_offset.
*/
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
/* Drop our reference to the buffer object. */
pvr_gem_object_put(pvr_obj);
err_drm_dev_exit:
drm_dev_exit(idx);
return ret;
}
static __always_inline u64
......@@ -516,10 +638,12 @@ pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args,
break;
case DRM_PVR_DEV_QUERY_HEAP_INFO_GET:
return -EINVAL;
ret = pvr_heap_info_get(pvr_dev, args);
break;
case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET:
return -EINVAL;
ret = pvr_static_data_areas_get(pvr_dev, args);
break;
}
drm_dev_exit(idx);
......@@ -666,7 +790,46 @@ static int
pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args,
struct drm_file *file)
{
return -ENOTTY;
struct drm_pvr_ioctl_create_vm_context_args *args = raw_args;
struct pvr_file *pvr_file = to_pvr_file(file);
struct pvr_vm_context *vm_ctx;
int idx;
int err;
if (!drm_dev_enter(drm_dev, &idx))
return -EIO;
if (args->_padding_4) {
err = -EINVAL;
goto err_drm_dev_exit;
}
vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true);
if (IS_ERR(vm_ctx)) {
err = PTR_ERR(vm_ctx);
goto err_drm_dev_exit;
}
/* Allocate object handle for userspace. */
err = xa_alloc(&pvr_file->vm_ctx_handles,
&args->handle,
vm_ctx,
xa_limit_32b,
GFP_KERNEL);
if (err < 0)
goto err_cleanup;
drm_dev_exit(idx);
return 0;
err_cleanup:
pvr_vm_context_put(vm_ctx);
err_drm_dev_exit:
drm_dev_exit(idx);
return err;
}
/**
......@@ -686,7 +849,19 @@ static int
pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args,
struct drm_file *file)
{
return -ENOTTY;
struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args;
struct pvr_file *pvr_file = to_pvr_file(file);
struct pvr_vm_context *vm_ctx;
if (args->_padding_4)
return -EINVAL;
vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle);
if (!vm_ctx)
return -EINVAL;
pvr_vm_context_put(vm_ctx);
return 0;
}
/**
......@@ -716,7 +891,79 @@ static int
pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args,
struct drm_file *file)
{
return -ENOTTY;
struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
struct drm_pvr_ioctl_vm_map_args *args = raw_args;
struct pvr_file *pvr_file = to_pvr_file(file);
struct pvr_vm_context *vm_ctx;
struct pvr_gem_object *pvr_obj;
size_t pvr_obj_size;
u64 offset_plus_size;
int idx;
int err;
if (!drm_dev_enter(drm_dev, &idx))
return -EIO;
/* Initial validation of args. */
if (args->_padding_14) {
err = -EINVAL;
goto err_drm_dev_exit;
}
if (args->flags != 0 ||
check_add_overflow(args->offset, args->size, &offset_plus_size) ||
!pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) {
err = -EINVAL;
goto err_drm_dev_exit;
}
vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
if (!vm_ctx) {
err = -EINVAL;
goto err_drm_dev_exit;
}
pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
if (!pvr_obj) {
err = -ENOENT;
goto err_put_vm_context;
}
pvr_obj_size = pvr_gem_object_size(pvr_obj);
/*
* Validate offset and size args. The alignment of these will be
* checked when mapping; for now just check that they're within valid
* bounds
*/
if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) {
err = -EINVAL;
goto err_put_pvr_object;
}
err = pvr_vm_map(vm_ctx, pvr_obj, args->offset,
args->device_addr, args->size);
if (err)
goto err_put_pvr_object;
/*
* In order to set up the mapping, we needed a reference to &pvr_obj.
* However, pvr_vm_map() obtains and stores its own reference, so we
* must release ours before returning.
*/
err_put_pvr_object:
pvr_gem_object_put(pvr_obj);
err_put_vm_context:
pvr_vm_context_put(vm_ctx);
err_drm_dev_exit:
drm_dev_exit(idx);
return err;
}
/**
......@@ -739,7 +986,24 @@ static int
pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args,
struct drm_file *file)
{
return -ENOTTY;
struct drm_pvr_ioctl_vm_unmap_args *args = raw_args;
struct pvr_file *pvr_file = to_pvr_file(file);
struct pvr_vm_context *vm_ctx;
int err;
/* Initial validation of args. */
if (args->_padding_4)
return -EINVAL;
vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
if (!vm_ctx)
return -EINVAL;
err = pvr_vm_unmap(vm_ctx, args->device_addr, args->size);
pvr_vm_context_put(vm_ctx);
return err;
}
/*
......@@ -930,6 +1194,8 @@ pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
*/
pvr_file->pvr_dev = pvr_dev;
xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1);
/*
* Store reference to powervr-specific file private data in DRM file
* private data.
......@@ -955,6 +1221,9 @@ pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
{
struct pvr_file *pvr_file = to_pvr_file(file);
/* Drop references on any remaining objects. */
pvr_destroy_vm_contexts_for_file(pvr_file);
kfree(pvr_file);
file->driver_priv = NULL;
}
......@@ -962,7 +1231,7 @@ pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops);
static struct drm_driver pvr_drm_driver = {
.driver_features = DRIVER_RENDER,
.driver_features = DRIVER_GEM | DRIVER_GEM_GPUVA | DRIVER_RENDER,
.open = pvr_drm_driver_open,
.postclose = pvr_drm_driver_postclose,
.ioctls = pvr_drm_driver_ioctls,
......@@ -976,6 +1245,8 @@ static struct drm_driver pvr_drm_driver = {
.minor = PVR_DRIVER_MINOR,
.patchlevel = PVR_DRIVER_PATCHLEVEL,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.gem_create_object = pvr_gem_create_object,
};
static int
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/* Copyright (c) 2023 Imagination Technologies Ltd. */
#ifndef PVR_GEM_H
#define PVR_GEM_H
#include "pvr_rogue_heap_config.h"
#include "pvr_rogue_meta.h"
#include <uapi/drm/pvr_drm.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/const.h>
#include <linux/compiler_attributes.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/types.h>
/* Forward declaration from "pvr_device.h". */
struct pvr_device;
struct pvr_file;
/**
* DOC: Flags for DRM_IOCTL_PVR_CREATE_BO (kernel-only)
*
* Kernel-only values allowed in &pvr_gem_object->flags. The majority of options
* for this field are specified in the UAPI header "pvr_drm.h" with a
* DRM_PVR_BO_ prefix. To distinguish these internal options (which must exist
* in ranges marked as "reserved" in the UAPI header), we drop the DRM prefix.
* The public options should be used directly, DRM prefix and all.
*
* To avoid potentially confusing gaps in the UAPI options, these kernel-only
* options are specified "in reverse", starting at bit 63.
*
* We use "reserved" to refer to bits defined here and not exposed in the UAPI.
* Bits not defined anywhere are "undefined".
*
* CPU mapping options
* :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this
* flag to override this behaviour and map the object cached.
*
* Firmware options
* :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard
* reset. Set this flag to override this behaviour and preserve buffer contents on reset.
*/
#define PVR_BO_CPU_CACHED BIT_ULL(63)
#define PVR_BO_FW_NO_CLEAR_ON_RESET BIT_ULL(62)
#define PVR_BO_KERNEL_FLAGS_MASK (PVR_BO_CPU_CACHED | PVR_BO_FW_NO_CLEAR_ON_RESET)
/* Bits 61..3 are undefined. */
/* Bits 2..0 are defined in the UAPI. */
/* Other utilities. */
#define PVR_BO_UNDEFINED_MASK ~(PVR_BO_KERNEL_FLAGS_MASK | DRM_PVR_BO_FLAGS_MASK)
/*
* All firmware-mapped memory uses (mostly) the same flags. Specifically,
* firmware-mapped memory should be:
* * Read/write on the device,
* * Read/write on the CPU, and
* * Write-combined on the CPU.
*
* The only variation is in caching on the device.
*/
#define PVR_BO_FW_FLAGS_DEVICE_CACHED (ULL(0))
#define PVR_BO_FW_FLAGS_DEVICE_UNCACHED DRM_PVR_BO_BYPASS_DEVICE_CACHE
/**
* struct pvr_gem_object - powervr-specific wrapper for &struct drm_gem_object
*/
struct pvr_gem_object {
/**
* @base: The underlying &struct drm_gem_shmem_object.
*
* Do not access this member directly, instead call
* shem_gem_from_pvr_gem().
*/
struct drm_gem_shmem_object base;
/**
* @flags: Options set at creation-time. Some of these options apply to
* the creation operation itself (which are stored here for reference)
* with the remainder used for mapping options to both the device and
* CPU. These are used every time this object is mapped, but may be
* changed after creation.
*
* Must be a combination of DRM_PVR_BO_* and/or PVR_BO_* flags.
*
* .. note::
*
* This member is declared const to indicate that none of these
* options may change or be changed throughout the object's
* lifetime.
*/
u64 flags;
};
static_assert(offsetof(struct pvr_gem_object, base) == 0,
"offsetof(struct pvr_gem_object, base) not zero");
#define shmem_gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base)
#define shmem_gem_to_pvr_gem(shmem_obj) container_of_const(shmem_obj, struct pvr_gem_object, base)
#define gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base.base)
#define gem_to_pvr_gem(gem_obj) container_of_const(gem_obj, struct pvr_gem_object, base.base)
/* Functions defined in pvr_gem.c */
struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size);
struct pvr_gem_object *pvr_gem_object_create(struct pvr_device *pvr_dev,
size_t size, u64 flags);
int pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
struct pvr_file *pvr_file, u32 *handle);
struct pvr_gem_object *pvr_gem_object_from_handle(struct pvr_file *pvr_file,
u32 handle);
static __always_inline struct sg_table *
pvr_gem_object_get_pages_sgt(struct pvr_gem_object *pvr_obj)
{
return drm_gem_shmem_get_pages_sgt(shmem_gem_from_pvr_gem(pvr_obj));
}
void *pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj);
void pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj);
int pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
dma_addr_t *dma_addr_out);
/**
* pvr_gem_object_get() - Acquire reference on pvr_gem_object
* @pvr_obj: Pointer to object to acquire reference on.
*/
static __always_inline void
pvr_gem_object_get(struct pvr_gem_object *pvr_obj)
{
drm_gem_object_get(gem_from_pvr_gem(pvr_obj));
}
/**
* pvr_gem_object_put() - Release reference on pvr_gem_object
* @pvr_obj: Pointer to object to release reference on.
*/
static __always_inline void
pvr_gem_object_put(struct pvr_gem_object *pvr_obj)
{
drm_gem_object_put(gem_from_pvr_gem(pvr_obj));
}
static __always_inline size_t
pvr_gem_object_size(struct pvr_gem_object *pvr_obj)
{
return gem_from_pvr_gem(pvr_obj)->size;
}
#endif /* PVR_GEM_H */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/* Copyright (c) 2023 Imagination Technologies Ltd. */
#ifndef PVR_MMU_H
#define PVR_MMU_H
#include <linux/memory.h>
#include <linux/types.h>
/* Forward declaration from "pvr_device.h" */
struct pvr_device;
/* Forward declaration from "pvr_mmu.c" */
struct pvr_mmu_context;
struct pvr_mmu_op_context;
/* Forward declaration from "pvr_vm.c" */
struct pvr_vm_context;
/* Forward declaration from <linux/scatterlist.h> */
struct sg_table;
/**
* DOC: Public API (constants)
*
* .. c:macro:: PVR_DEVICE_PAGE_SIZE
*
* Fixed page size referenced by leaf nodes in the page table tree
* structure. In the current implementation, this value is pegged to the
* CPU page size (%PAGE_SIZE). It is therefore an error to specify a CPU
* page size which is not also a supported device page size. The supported
* device page sizes are: 4KiB, 16KiB, 64KiB, 256KiB, 1MiB and 2MiB.
*
* .. c:macro:: PVR_DEVICE_PAGE_SHIFT
*
* Shift value used to efficiently multiply or divide by
* %PVR_DEVICE_PAGE_SIZE.
*
* This value is derived from %PVR_DEVICE_PAGE_SIZE.
*
* .. c:macro:: PVR_DEVICE_PAGE_MASK
*
* Mask used to round a value down to the nearest multiple of
* %PVR_DEVICE_PAGE_SIZE. When bitwise negated, it will indicate whether a
* value is already a multiple of %PVR_DEVICE_PAGE_SIZE.
*
* This value is derived from %PVR_DEVICE_PAGE_SIZE.
*/
/* PVR_DEVICE_PAGE_SIZE determines the page size */
#define PVR_DEVICE_PAGE_SIZE (PAGE_SIZE)
#define PVR_DEVICE_PAGE_SHIFT (PAGE_SHIFT)
#define PVR_DEVICE_PAGE_MASK (PAGE_MASK)
/**
* DOC: Page table index utilities (constants)
*
* .. c:macro:: PVR_PAGE_TABLE_ADDR_SPACE_SIZE
*
* Size of device-virtual address space which can be represented in the page
* table structure.
*
* This value is checked at runtime against
* &pvr_device_features.virtual_address_space_bits by
* pvr_vm_create_context(), which will return an error if the feature value
* does not match this constant.
*
* .. admonition:: Future work
*
* It should be possible to support other values of
* &pvr_device_features.virtual_address_space_bits, but so far no
* hardware has been created which advertises an unsupported value.
*
* .. c:macro:: PVR_PAGE_TABLE_ADDR_BITS
*
* Number of bits needed to represent any value less than
* %PVR_PAGE_TABLE_ADDR_SPACE_SIZE exactly.
*
* .. c:macro:: PVR_PAGE_TABLE_ADDR_MASK
*
* Bitmask of device-virtual addresses which are valid in the page table
* structure.
*
* This value is derived from %PVR_PAGE_TABLE_ADDR_SPACE_SIZE, so the same
* notes on that constant apply here.
*/
#define PVR_PAGE_TABLE_ADDR_SPACE_SIZE SZ_1T
#define PVR_PAGE_TABLE_ADDR_BITS __ffs(PVR_PAGE_TABLE_ADDR_SPACE_SIZE)
#define PVR_PAGE_TABLE_ADDR_MASK (PVR_PAGE_TABLE_ADDR_SPACE_SIZE - 1)
void pvr_mmu_flush_request_all(struct pvr_device *pvr_dev);
int pvr_mmu_flush_exec(struct pvr_device *pvr_dev, bool wait);
struct pvr_mmu_context *pvr_mmu_context_create(struct pvr_device *pvr_dev);
void pvr_mmu_context_destroy(struct pvr_mmu_context *ctx);
dma_addr_t pvr_mmu_get_root_table_dma_addr(struct pvr_mmu_context *ctx);
void pvr_mmu_op_context_destroy(struct pvr_mmu_op_context *op_ctx);
struct pvr_mmu_op_context *
pvr_mmu_op_context_create(struct pvr_mmu_context *ctx,
struct sg_table *sgt, u64 sgt_offset, u64 size);
int pvr_mmu_map(struct pvr_mmu_op_context *op_ctx, u64 size, u64 flags,
u64 device_addr);
int pvr_mmu_unmap(struct pvr_mmu_op_context *op_ctx, u64 device_addr, u64 size);
#endif /* PVR_MMU_H */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/* Copyright (c) 2023 Imagination Technologies Ltd. */
#ifndef PVR_VM_H
#define PVR_VM_H
#include "pvr_rogue_mmu_defs.h"
#include <uapi/drm/pvr_drm.h>
#include <linux/types.h>
/* Forward declaration from "pvr_device.h" */
struct pvr_device;
struct pvr_file;
/* Forward declaration from "pvr_gem.h" */
struct pvr_gem_object;
/* Forward declaration from "pvr_vm.c" */
struct pvr_vm_context;
/* Forward declaration from <uapi/drm/pvr_drm.h> */
struct drm_pvr_ioctl_get_heap_info_args;
/* Forward declaration from <drm/drm_exec.h> */
struct drm_exec;
/* Functions defined in pvr_vm.c */
bool pvr_device_addr_is_valid(u64 device_addr);
bool pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size);
struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
bool is_userspace_context);
int pvr_vm_map(struct pvr_vm_context *vm_ctx,
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
u64 device_addr, u64 size);
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx);
struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx);
int pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
struct drm_pvr_ioctl_dev_query_args *args);
int pvr_heap_info_get(const struct pvr_device *pvr_dev,
struct drm_pvr_ioctl_dev_query_args *args);
const struct drm_pvr_heap *pvr_find_heap_containing(struct pvr_device *pvr_dev,
u64 addr, u64 size);
struct pvr_gem_object *pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx,
u64 device_addr,
u64 *mapped_offset_out,
u64 *mapped_size_out);
struct pvr_fw_object *
pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx);
struct pvr_vm_context *pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle);
struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx);
bool pvr_vm_context_put(struct pvr_vm_context *vm_ctx);
void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file);
#endif /* PVR_VM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment