Commit 2504c7ec authored by Thomas Zimmermann's avatar Thomas Zimmermann

drm: Remove source code for non-KMS drivers

Remove all remaining source code for non-KMS drivers. These drivers
have been removed in v6.3 and won't comeback.
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: default avatarDavid Airlie <airlied@gmail.com>
Reviewed-by: default avatarDaniel Vetter <daniel@ffwll.ch>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231122122449.11588-13-tzimmermann@suse.de
parent 2798ffcc
......@@ -47,18 +47,6 @@ drm-y := \
drm_vblank_work.o \
drm_vma_manager.o \
drm_writeback.o
drm-$(CONFIG_DRM_LEGACY) += \
drm_agpsupport.o \
drm_bufs.o \
drm_context.o \
drm_dma.o \
drm_hashtab.o \
drm_irq.o \
drm_legacy_misc.o \
drm_lock.o \
drm_memory.o \
drm_scatter.o \
drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
......
/*
* \file drm_agpsupport.c
* DRM support for AGP/GART backend
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
#endif
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
#if IS_ENABLED(CONFIG_AGP)
/*
* Get AGP information.
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
struct agp_kern_info *kern;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
kern = &dev->agp->agp_info;
info->agp_version_major = kern->version.major;
info->agp_version_minor = kern->version.minor;
info->mode = kern->mode;
info->aperture_base = kern->aper_base;
info->aperture_size = kern->aper_size * 1024 * 1024;
info->memory_allowed = kern->max_memory << PAGE_SHIFT;
info->memory_used = kern->current_memory << PAGE_SHIFT;
info->id_vendor = kern->device->vendor;
info->id_device = kern->device->device;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_info);
int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_info *info = data;
int err;
err = drm_legacy_agp_info(dev, info);
if (err)
return err;
return 0;
}
/*
* Acquire the AGP device.
*
* \param dev DRM device that is to acquire AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
int drm_legacy_agp_acquire(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
dev->agp->bridge = agp_backend_acquire(pdev);
if (!dev->agp->bridge)
return -ENODEV;
dev->agp->acquired = 1;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_acquire);
/*
* Acquire the AGP device (ioctl).
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev);
}
/*
* Release the AGP device.
*
* \param dev DRM device that is to release AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
int drm_legacy_agp_release(struct drm_device *dev)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
agp_backend_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_release);
int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_legacy_agp_release(dev);
}
/*
* Enable the AGP bus.
*
* \param dev DRM device that has previously acquired AGP.
* \param mode Requested AGP mode.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode);
dev->agp->enabled = 1;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_enable);
int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_mode *mode = data;
return drm_legacy_agp_enable(dev, *mode);
}
/*
* Allocate AGP memory.
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
* memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
struct agp_memory *memory;
unsigned long pages;
u32 type;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
type = (u32) request->type;
memory = agp_allocate_memory(dev->agp->bridge, pages, type);
if (!memory) {
kfree(entry);
return -ENOMEM;
}
entry->handle = (unsigned long)memory->key + 1;
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
list_add(&entry->head, &dev->agp->memory);
request->handle = entry->handle;
request->physical = memory->physical;
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_alloc);
int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer *request = data;
return drm_legacy_agp_alloc(dev, request);
}
/*
* Search for the AGP memory entry associated with a handle.
*
* \param dev DRM device structure.
* \param handle AGP memory handle.
* \return pointer to the drm_agp_mem structure associated with \p handle.
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev,
unsigned long handle)
{
struct drm_agp_mem *entry;
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
return NULL;
}
/*
* Unbind AGP memory from the GATT (ioctl).
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and acquired, looks-up the AGP memory
* entry and passes it to the unbind_agp() function.
*/
int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
struct drm_agp_mem *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry || !entry->bound)
return -EINVAL;
ret = agp_unbind_memory(entry->memory);
if (ret == 0)
entry->bound = 0;
return ret;
}
EXPORT_SYMBOL(drm_legacy_agp_unbind);
int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding *request = data;
return drm_legacy_agp_unbind(dev, request);
}
/*
* Bind AGP memory into the GATT (ioctl)
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and that no memory
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
* it to bind_agp() function.
*/
int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
struct drm_agp_mem *entry;
int retcode;
int page;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry || entry->bound)
return -EINVAL;
page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
retcode = agp_bind_memory(entry->memory, page);
if (retcode)
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
dev->agp->base, entry->bound);
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_bind);
int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding *request = data;
return drm_legacy_agp_bind(dev, request);
}
/*
* Free AGP memory (ioctl).
*
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and looks up the
* AGP memory entry. If the memory is currently bound, unbind it via
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/
int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry)
return -EINVAL;
if (entry->bound)
agp_unbind_memory(entry->memory);
list_del(&entry->head);
agp_free_memory(entry->memory);
kfree(entry);
return 0;
}
EXPORT_SYMBOL(drm_legacy_agp_free);
int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer *request = data;
return drm_legacy_agp_free(dev, request);
}
/*
* Initialize the AGP resources.
*
* \return pointer to a drm_agp_head structure.
*
* Gets the drm_agp_t structure which is made available by the agpgart module
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
*
* Note that final cleanup of the kmalloced structure is directly done in
* drm_pci_agp_destroy.
*/
struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_agp_head *head = NULL;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return NULL;
head->bridge = agp_find_bridge(pdev);
if (!head->bridge) {
head->bridge = agp_backend_acquire(pdev);
if (!head->bridge) {
kfree(head);
return NULL;
}
agp_copy_info(head->bridge, &head->agp_info);
agp_backend_release(head->bridge);
} else {
agp_copy_info(head->bridge, &head->agp_info);
}
if (head->agp_info.chipset == NOT_SUPPORTED) {
kfree(head);
return NULL;
}
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
head->base = head->agp_info.aper_base;
return head;
}
/* Only exported for i810.ko */
EXPORT_SYMBOL(drm_legacy_agp_init);
/**
* drm_legacy_agp_clear - Clear AGP resource list
* @dev: DRM device
*
* Iterate over all AGP resources and remove them. But keep the AGP head
* intact so it can still be used. It is safe to call this if AGP is disabled or
* was already removed.
*
* Cleanup is only done for drivers who have DRIVER_LEGACY set.
*/
void drm_legacy_agp_clear(struct drm_device *dev)
{
struct drm_agp_mem *entry, *tempe;
if (!dev->agp)
return;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
agp_unbind_memory(entry->memory);
agp_free_memory(entry->memory);
kfree(entry);
}
INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_legacy_agp_release(dev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
#endif
/*
* Legacy: Generic DRM Buffer Management
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
* Author: Gareth Hughes <gareth@valinux.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/nospec.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <asm/shmparam.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
/*
* Because the kernel-userspace ABI is fixed at a 32-bit offset
* while PCI resources may live above that, we only compare the
* lower 32 bits of the map offset for maps of type
* _DRM_FRAMEBUFFER or _DRM_REGISTERS.
* It is assumed that if a driver have more than one resource
* of each type, the lower 32 bits are different.
*/
if (!entry->map ||
map->type != entry->map->type ||
entry->master != dev->master)
continue;
switch (map->type) {
case _DRM_SHM:
if (map->flags != _DRM_CONTAINS_LOCK)
break;
return entry;
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
if ((entry->map->offset & 0xffffffff) ==
(map->offset & 0xffffffff))
return entry;
break;
default: /* Make gcc happy */
break;
}
if (entry->map->offset == map->offset)
return entry;
}
return NULL;
}
static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
unsigned long user_token, int hashed_handle, int shm)
{
int use_hashed_handle, shift;
unsigned long add;
#if (BITS_PER_LONG == 64)
use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
#elif (BITS_PER_LONG == 32)
use_hashed_handle = hashed_handle;
#else
#error Unsupported long size. Neither 64 nor 32 bits.
#endif
if (!use_hashed_handle) {
int ret;
hash->key = user_token >> PAGE_SHIFT;
ret = drm_ht_insert_item(&dev->map_hash, hash);
if (ret != -EINVAL)
return ret;
}
shift = 0;
add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
if (shm && (SHMLBA > PAGE_SIZE)) {
int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
/* For shared memory, we have to preserve the SHMLBA
* bits of the eventual vma->vm_pgoff value during
* mmap(). Otherwise we run into cache aliasing problems
* on some platforms. On these platforms, the pgoff of
* a mmap() request is used to pick a suitable virtual
* address for the mmap() region such that it will not
* cause cache aliasing problems.
*
* Therefore, make sure the SHMLBA relevant bits of the
* hash value we use are equal to those in the original
* kernel virtual address.
*/
shift = bits;
add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
}
return drm_ht_just_insert_please(&dev->map_hash, hash,
user_token, 32 - PAGE_SHIFT - 3,
shift, add);
}
/*
* Core function to create a range of memory available for mapping by a
* non-root process.
*
* Adjusts the memory offset to its absolute value according to the mapping
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel.
*/
static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags,
struct drm_map_list **maplist)
{
struct drm_local_map *map;
struct drm_map_list *list;
unsigned long user_token;
int ret;
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
map->offset = offset;
map->size = size;
map->flags = flags;
map->type = type;
/* Only allow shared memory to be removable since we only keep enough
* book keeping information about shared memory to allow for removal
* when processes fork.
*/
if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
kfree(map);
return -EINVAL;
}
DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
(unsigned long long)map->offset, map->size, map->type);
/* page-align _DRM_SHM maps. They are allocated here so there is no security
* hole created by that and it works around various broken drivers that use
* a non-aligned quantity to map the SAREA. --BenH
*/
if (map->type == _DRM_SHM)
map->size = PAGE_ALIGN(map->size);
if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
kfree(map);
return -EINVAL;
}
map->mtrr = -1;
map->handle = NULL;
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
if (map->offset + (map->size-1) < map->offset ||
map->offset < virt_to_phys(high_memory)) {
kfree(map);
return -EINVAL;
}
#endif
/* Some drivers preinitialize some maps, without the X Server
* needing to be aware of it. Therefore, we just return success
* when the server tries to create a duplicate map.
*/
list = drm_find_matching_map(dev, map);
if (list != NULL) {
if (list->map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size,
list->map->size);
list->map->size = map->size;
}
kfree(map);
*maplist = list;
return 0;
}
if (map->type == _DRM_FRAME_BUFFER ||
(map->flags & _DRM_WRITE_COMBINING)) {
map->mtrr =
arch_phys_wc_add(map->offset, map->size);
}
if (map->type == _DRM_REGISTERS) {
if (map->flags & _DRM_WRITE_COMBINING)
map->handle = ioremap_wc(map->offset,
map->size);
else
map->handle = ioremap(map->offset, map->size);
if (!map->handle) {
kfree(map);
return -ENOMEM;
}
}
break;
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
if (list != NULL) {
if (list->map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size, list->map->size);
list->map->size = map->size;
}
kfree(map);
*maplist = list;
return 0;
}
map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n",
map->size, order_base_2(map->size), map->handle);
if (!map->handle) {
kfree(map);
return -ENOMEM;
}
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
if (dev->master->lock.hw_lock != NULL) {
vfree(map->handle);
kfree(map);
return -EBUSY;
}
dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
case _DRM_AGP: {
struct drm_agp_mem *entry;
int valid = 0;
if (!dev->agp) {
kfree(map);
return -EINVAL;
}
#ifdef __alpha__
map->offset += dev->hose->mem_space->start;
#endif
/* In some cases (i810 driver), user space may have already
* added the AGP base itself, because dev->agp->base previously
* only got set during AGP enable. So, only add the base
* address if the map's offset isn't already within the
* aperture.
*/
if (map->offset < dev->agp->base ||
map->offset > dev->agp->base +
dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
map->offset += dev->agp->base;
}
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
/* This assumes the DRM is in total control of AGP space.
* It's not always the case as AGP can be in the control
* of user space (i.e. i810 driver). So this loop will get
* skipped and we double check that dev->agp->memory is
* actually set as well as being invalid before EPERM'ing
*/
list_for_each_entry(entry, &dev->agp->memory, head) {
if ((map->offset >= entry->bound) &&
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (!list_empty(&dev->agp->memory) && !valid) {
kfree(map);
return -EPERM;
}
DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
(unsigned long long)map->offset, map->size);
break;
}
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
kfree(map);
return -EINVAL;
}
map->offset += (unsigned long)dev->sg->virtual;
break;
case _DRM_CONSISTENT:
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first.
*/
map->handle = dma_alloc_coherent(dev->dev,
map->size,
&map->offset,
GFP_KERNEL);
if (!map->handle) {
kfree(map);
return -ENOMEM;
}
break;
default:
kfree(map);
return -EINVAL;
}
list = kzalloc(sizeof(*list), GFP_KERNEL);
if (!list) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
kfree(map);
return -EINVAL;
}
list->map = map;
mutex_lock(&dev->struct_mutex);
list_add(&list->head, &dev->maplist);
/* Assign a 32-bit handle */
/* We do it here so that dev->struct_mutex protects the increment */
user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
map->offset;
ret = drm_map_handle(dev, &list->hash, user_token, 0,
(map->type == _DRM_SHM));
if (ret) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
kfree(map);
kfree(list);
mutex_unlock(&dev->struct_mutex);
return ret;
}
list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
if (!(map->flags & _DRM_DRIVER))
list->master = dev->master;
*maplist = list;
return 0;
}
int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_ptr)
{
struct drm_map_list *list;
int rc;
rc = drm_addmap_core(dev, offset, size, type, flags, &list);
if (!rc)
*map_ptr = list->map;
return rc;
}
EXPORT_SYMBOL(drm_legacy_addmap);
struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
unsigned int token)
{
struct drm_map_list *_entry;
list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;
}
EXPORT_SYMBOL(drm_legacy_findmap);
/*
* Ioctl to specify a range of memory that is available for mapping by a
* non-root process.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_map structure.
* \return zero on success or a negative value on error.
*
*/
int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
struct drm_map_list *maplist;
int err;
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
map->flags, &maplist);
if (err)
return err;
/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
map->handle = (void *)(unsigned long)maplist->user_token;
/*
* It appears that there are no users of this value whatsoever --
* drmAddMap just discards it. Let's not encourage its use.
* (Keeping drm_addmap_core's returned mtrr value would be wrong --
* it's not a real mtrr index anymore.)
*/
map->mtrr = -1;
return 0;
}
/*
* Get a mapping information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_map structure.
*
* \return zero on success or a negative number on failure.
*
* Searches for the mapping with the specified offset and copies its information
* into userspace
*/
int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
struct drm_map_list *r_list = NULL;
struct list_head *list;
int idx;
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
idx = map->offset;
if (idx < 0)
return -EINVAL;
i = 0;
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
}
if (!r_list || !r_list->map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
map->offset = r_list->map->offset;
map->size = r_list->map->size;
map->type = r_list->map->type;
map->flags = r_list->map->flags;
map->handle = (void *)(unsigned long) r_list->user_token;
map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/*
* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*
* Searches the map on drm_device::maplist, removes it from the list, see if
* it's being used, and free any associated resource (such as MTRR's) if it's not
* being on use.
*
* \sa drm_legacy_addmap
*/
int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
int found = 0;
struct drm_master *master;
/* Find the list entry for the map and remove it */
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
master = r_list->master;
list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
kfree(r_list);
found = 1;
break;
}
}
if (!found)
return -EINVAL;
switch (map->type) {
case _DRM_REGISTERS:
iounmap(map->handle);
fallthrough;
case _DRM_FRAME_BUFFER:
arch_phys_wc_del(map->mtrr);
break;
case _DRM_SHM:
vfree(map->handle);
if (master) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL; /* SHM removed */
master->lock.file_priv = NULL;
wake_up_interruptible_all(&master->lock.lock_queue);
}
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
break;
}
kfree(map);
return 0;
}
EXPORT_SYMBOL(drm_legacy_rmmap_locked);
void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_legacy_rmmap);
void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
{
struct drm_map_list *r_list, *list_temp;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
drm_legacy_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
mutex_unlock(&dev->struct_mutex);
}
void drm_legacy_rmmaps(struct drm_device *dev)
{
struct drm_map_list *r_list, *list_temp;
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_legacy_rmmap(dev, r_list->map);
}
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems
* like a pointless exercise since they're going away anyway.
*
* One use case might be after addmap is allowed for normal users for SHM and
* gets used by drivers that the server doesn't need to care about. This seems
* unlikely.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a struct drm_map structure.
* \return zero on success or a negative value on error.
*/
int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *request = data;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list;
int ret;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->user_token == (unsigned long)request->handle &&
r_list->map->flags & _DRM_REMOVABLE) {
map = r_list->map;
break;
}
}
/* List has wrapped around to the head pointer, or it's empty we didn't
* find anything.
*/
if (list_empty(&dev->maplist) || !map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
/* Register and framebuffer maps are permanent */
if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
ret = drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Cleanup after an error on one of the addbufs() functions.
*
* \param dev DRM device.
* \param entry buffer entry where the error occurred.
*
* Frees any pages and buffers associated with the given entry.
*/
static void drm_cleanup_buf_error(struct drm_device *dev,
struct drm_buf_entry *entry)
{
drm_dma_handle_t *dmah;
int i;
if (entry->seg_count) {
for (i = 0; i < entry->seg_count; i++) {
if (entry->seglist[i]) {
dmah = entry->seglist[i];
dma_free_coherent(dev->dev,
dmah->size,
dmah->vaddr,
dmah->busaddr);
kfree(dmah);
}
}
kfree(entry->seglist);
entry->seg_count = 0;
}
if (entry->buf_count) {
for (i = 0; i < entry->buf_count; i++) {
kfree(entry->buflist[i].dev_private);
}
kfree(entry->buflist);
entry->buf_count = 0;
}
}
#if IS_ENABLED(CONFIG_AGP)
/*
* Add AGP buffers for DMA transfers.
*
* \param dev struct drm_device to which the buffers are to be added.
* \param request pointer to a struct drm_buf_desc describing the request.
* \return zero on success or a negative number on failure.
*
* After some sanity checks creates a drm_buf structure for each buffer and
* reallocates the buffer list of the same size order to accommodate the new
* buffers.
*/
int drm_legacy_addbufs_agp(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
struct drm_agp_mem *agp_entry;
struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i, valid;
struct drm_buf **temp_buflist;
if (!dma)
return -EINVAL;
count = request->count;
order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request->agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %lx\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
list_for_each_entry(agp_entry, &dev->agp->memory, head) {
if ((agp_offset >= agp_entry->bound) &&
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (!list_empty(&dev->agp->memory) && !valid) {
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while (entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
DRM_DEBUG("byte_count: %d\n", byte_count);
temp_buflist = krealloc(dma->buflist,
(dma->buf_count + entry->buf_count) *
sizeof(*dma->buflist), GFP_KERNEL);
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->buflist = temp_buflist;
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
dma->flags = _DRM_DMA_USE_AGP;
atomic_dec(&dev->buf_alloc);
return 0;
}
EXPORT_SYMBOL(drm_legacy_addbufs_agp);
#endif /* CONFIG_AGP */
int drm_legacy_addbufs_pci(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
int count;
int order;
int size;
int total;
int page_order;
struct drm_buf_entry *entry;
drm_dma_handle_t *dmah;
struct drm_buf *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
unsigned long *temp_pagelist;
struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
count = request->count;
order = order_base_2(request->size);
size = 1 << order;
DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
request->count, request->size, size, order);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
if (!entry->seglist) {
kfree(entry->buflist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
/* Keep the original pagelist until we know all the allocations
* have succeeded
*/
temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
sizeof(*dma->pagelist),
GFP_KERNEL);
if (!temp_pagelist) {
kfree(entry->buflist);
kfree(entry->seglist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memcpy(temp_pagelist,
dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while (entry->buf_count < count) {
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
kfree(temp_pagelist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dmah->size = total;
dmah->vaddr = dma_alloc_coherent(dev->dev,
dmah->size,
&dmah->busaddr,
GFP_KERNEL);
if (!dmah->vaddr) {
kfree(dmah);
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
kfree(temp_pagelist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
entry->seglist[entry->seg_count++] = dmah;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
(unsigned long)dmah->vaddr + PAGE_SIZE * i);
temp_pagelist[dma->page_count + page_count++]
= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(dmah->vaddr + offset);
buf->bus_address = dmah->busaddr + offset;
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = kzalloc(buf->dev_priv_size,
GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
kfree(temp_pagelist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
temp_buflist = krealloc(dma->buflist,
(dma->buf_count + entry->buf_count) *
sizeof(*dma->buflist), GFP_KERNEL);
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
kfree(temp_pagelist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->buflist = temp_buflist;
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
/* No allocations failed, so now we can replace the original pagelist
* with the new one.
*/
if (dma->page_count) {
kfree(dma->pagelist);
}
dma->pagelist = temp_pagelist;
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
if (request->flags & _DRM_PCI_BUFFER_RO)
dma->flags = _DRM_DMA_USE_PCI_RO;
atomic_dec(&dev->buf_alloc);
return 0;
}
EXPORT_SYMBOL(drm_legacy_addbufs_pci);
static int drm_legacy_addbufs_sg(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
count = request->count;
order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request->agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while (entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset
+ (unsigned long)dev->sg->virtual);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
DRM_DEBUG("byte_count: %d\n", byte_count);
temp_buflist = krealloc(dma->buflist,
(dma->buf_count + entry->buf_count) *
sizeof(*dma->buflist), GFP_KERNEL);
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->buflist = temp_buflist;
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
dma->flags = _DRM_DMA_USE_SG;
atomic_dec(&dev->buf_alloc);
return 0;
}
/*
* Add buffers for DMA transfers (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a struct drm_buf_desc request.
* \return zero on success or a negative number on failure.
*
* According with the memory type specified in drm_buf_desc::flags and the
* build options, it dispatches the call either to addbufs_agp(),
* addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
* PCI memory respectively.
*/
int drm_legacy_addbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_buf_desc *request = data;
int ret;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
#if IS_ENABLED(CONFIG_AGP)
if (request->flags & _DRM_AGP_BUFFER)
ret = drm_legacy_addbufs_agp(dev, request);
else
#endif
if (request->flags & _DRM_SG_BUFFER)
ret = drm_legacy_addbufs_sg(dev, request);
else if (request->flags & _DRM_FB_BUFFER)
ret = -EINVAL;
else
ret = drm_legacy_addbufs_pci(dev, request);
return ret;
}
/*
* Get information about the buffer mappings.
*
* This was originally mean for debugging purposes, or by a sophisticated
* client library to determine how best to use the available buffers (e.g.,
* large buffers can be used for image transfer).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_info structure.
* \return zero on success or a negative number on failure.
*
* Increments drm_device::buf_use while holding the drm_device::buf_lock
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
int __drm_legacy_infobufs(struct drm_device *dev,
void *data, int *p,
int (*f)(void *, int, struct drm_buf_entry *))
{
struct drm_device_dma *dma = dev->dma;
int i;
int count;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->buf_lock);
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count)
++count;
}
DRM_DEBUG("count = %d\n", count);
if (*p >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
struct drm_buf_entry *from = &dma->bufs[i];
if (from->buf_count) {
if (f(data, count, from) < 0)
return -EFAULT;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].low_mark,
dma->bufs[i].high_mark);
++count;
}
}
}
*p = count;
return 0;
}
static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
{
struct drm_buf_info *request = data;
struct drm_buf_desc __user *to = &request->list[count];
struct drm_buf_desc v = {.count = from->buf_count,
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
return -EFAULT;
return 0;
}
int drm_legacy_infobufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_buf_info *request = data;
return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
}
/*
* Specifies a low and high water mark for buffer allocation
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg a pointer to a drm_buf_desc structure.
* \return zero on success or a negative number on failure.
*
* Verifies that the size order is bounded between the admissible orders and
* updates the respective drm_device_dma::bufs entry low and high water mark.
*
* \note This ioctl is deprecated and mostly never used.
*/
int drm_legacy_markbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_desc *request = data;
int order;
struct drm_buf_entry *entry;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
DRM_DEBUG("%d, %d, %d\n",
request->size, request->low_mark, request->high_mark);
order = order_base_2(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
entry = &dma->bufs[order];
if (request->low_mark < 0 || request->low_mark > entry->buf_count)
return -EINVAL;
if (request->high_mark < 0 || request->high_mark > entry->buf_count)
return -EINVAL;
entry->low_mark = request->low_mark;
entry->high_mark = request->high_mark;
return 0;
}
/*
* Unreserve the buffers in list, previously reserved using drmDMA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_free structure.
* \return zero on success or a negative number on failure.
*
* Calls free_buffer() for each used buffer.
* This function is primarily used for debugging.
*/
int drm_legacy_freebufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_free *request = data;
int i;
int idx;
struct drm_buf *buf;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
DRM_DEBUG("%d\n", request->count);
for (i = 0; i < request->count; i++) {
if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
return -EFAULT;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return -EINVAL;
}
idx = array_index_nospec(idx, dma->buf_count);
buf = dma->buflist[idx];
if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
task_pid_nr(current));
return -EINVAL;
}
drm_legacy_free_buffer(dev, buf);
}
return 0;
}
/*
* Maps all of the DMA buffers into client-virtual space (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_map structure.
* \return zero on success or a negative number on failure.
*
* Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
* about each buffer into user space. For PCI buffers, it calls vm_mmap() with
* offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
* drm_mmap_dma().
*/
int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
void __user **v,
int (*f)(void *, int, unsigned long,
struct drm_buf *),
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int retcode = 0;
unsigned long virtual;
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock(&dev->buf_lock);
if (*p >= dma->buf_count) {
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))) {
struct drm_local_map *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
if (!map) {
retcode = -EINVAL;
goto done;
}
virtual = vm_mmap(file_priv->filp, 0, map->size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
token);
} else {
virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0);
}
if (virtual > -1024UL) {
/* Real error */
retcode = (signed long)virtual;
goto done;
}
*v = (void __user *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (f(data, i, virtual, dma->buflist[i]) < 0) {
retcode = -EFAULT;
goto done;
}
}
}
done:
*p = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
return retcode;
}
static int map_one_buf(void *data, int idx, unsigned long virtual,
struct drm_buf *buf)
{
struct drm_buf_map *request = data;
unsigned long address = virtual + buf->offset; /* *** */
if (copy_to_user(&request->list[idx].idx, &buf->idx,
sizeof(request->list[0].idx)))
return -EFAULT;
if (copy_to_user(&request->list[idx].total, &buf->total,
sizeof(request->list[0].total)))
return -EFAULT;
if (clear_user(&request->list[idx].used, sizeof(int)))
return -EFAULT;
if (copy_to_user(&request->list[idx].address, &address,
sizeof(address)))
return -EFAULT;
return 0;
}
int drm_legacy_mapbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_buf_map *request = data;
return __drm_legacy_mapbufs(dev, data, &request->count,
&request->virtual, map_one_buf,
file_priv);
}
int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (dev->driver->dma_ioctl)
return dev->driver->dma_ioctl(dev, data, file_priv);
else
return -EINVAL;
}
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && entry->map->type == _DRM_SHM &&
(entry->map->flags & _DRM_CONTAINS_LOCK)) {
return entry->map;
}
}
return NULL;
}
EXPORT_SYMBOL(drm_legacy_getsarea);
/*
* Legacy: Generic DRM Contexts
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
* Author: Gareth Hughes <gareth@valinux.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
struct drm_ctx_list {
struct list_head head;
drm_context_t handle;
struct drm_file *tag;
};
/******************************************************************/
/** \name Context bitmap support */
/*@{*/
/*
* Free a handle from the context bitmap.
*
* \param dev DRM device.
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
}
/*
* Context bitmap allocation.
*
* \param dev DRM device.
* \return (non-negative) context handle on success or a negative number on failure.
*
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
{
int ret;
mutex_lock(&dev->struct_mutex);
ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
GFP_KERNEL);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Context bitmap initialization.
*
* \param dev DRM device.
*
* Initialise the drm_device::ctx_idr
*/
void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
idr_init(&dev->ctx_idr);
}
/*
* Context bitmap cleanup.
*
* \param dev DRM device.
*
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
/**
* drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file
* @dev: DRM device to operate on
* @file: Open file to flush contexts for
*
* This iterates over all contexts on @dev and drops them if they're owned by
* @file. Note that after this call returns, new contexts might be added if
* the file is still alive.
*/
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
{
struct drm_ctx_list *pos, *tmp;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->ctxlist_mutex);
list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
if (pos->tag == file &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, pos->handle);
drm_legacy_ctxbitmap_free(dev, pos->handle);
list_del(&pos->head);
kfree(pos);
}
}
mutex_unlock(&dev->ctxlist_mutex);
}
/*@}*/
/******************************************************************/
/** \name Per Context SAREA Support */
/*@{*/
/*
* Get per-context SAREA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
int drm_legacy_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map;
struct drm_map_list *_entry;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id);
if (!map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
request->handle =
(void *)(unsigned long)_entry->user_token;
break;
}
}
mutex_unlock(&dev->struct_mutex);
if (request->handle == NULL)
return -EINVAL;
return 0;
}
/*
* Set per-context SAREA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Searches the mapping specified in \p arg and update the entry in
* drm_device::ctx_idr with it.
*/
int drm_legacy_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long) request->handle)
goto found;
}
bad:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
found:
map = r_list->map;
if (!map)
goto bad;
if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
goto bad;
mutex_unlock(&dev->struct_mutex);
return 0;
}
/*@}*/
/******************************************************************/
/** \name The actual DRM context handling routines */
/*@{*/
/*
* Switch context.
*
* \param dev DRM device.
* \param old old context handle.
* \param new new context handle.
* \return zero on success or a negative number on failure.
*
* Attempt to set drm_device::context_flag.
*/
static int drm_context_switch(struct drm_device * dev, int old, int new)
{
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
return 0;
}
/*
* Complete context switch.
*
* \param dev DRM device.
* \param new new context handle.
* \return zero on success or a negative number on failure.
*
* Updates drm_device::last_context and drm_device::last_switch. Verifies the
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here.
*/
clear_bit(0, &dev->context_flag);
return 0;
}
/*
* Reserve contexts.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
int drm_legacy_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
return -EFAULT;
}
}
res->count = DRM_RESERVED_CONTEXTS;
return 0;
}
/*
* Add context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Get a new handle for the context and copy to userspace.
*/
int drm_legacy_addctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
int tmp_handle;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
tmp_handle = drm_legacy_ctxbitmap_next(dev);
if (tmp_handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
tmp_handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", tmp_handle);
if (tmp_handle < 0) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return tmp_handle;
}
ctx->handle = tmp_handle;
ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&ctx_entry->head);
ctx_entry->handle = ctx->handle;
ctx_entry->tag = file_priv;
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist);
mutex_unlock(&dev->ctxlist_mutex);
return 0;
}
/*
* Get context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
int drm_legacy_getctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
return 0;
}
/*
* Switch context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch().
*/
int drm_legacy_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
/*
* New context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch_complete().
*/
int drm_legacy_newctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
return 0;
}
/*
* Remove context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
int drm_legacy_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
drm_legacy_ctxbitmap_free(dev, ctx->handle);
}
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist)) {
struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx->handle) {
list_del(&pos->head);
kfree(pos);
}
}
}
mutex_unlock(&dev->ctxlist_mutex);
return 0;
}
/*@}*/
/*
* \file drm_dma.c
* DMA IOCTL and function support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
/**
* drm_legacy_dma_setup() - Initialize the DMA data.
*
* @dev: DRM device.
* Return: zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
int drm_legacy_dma_setup(struct drm_device *dev)
{
int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
return 0;
}
/**
* drm_legacy_dma_takedown() - Cleanup the DMA resources.
*
* @dev: DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
void drm_legacy_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
drm_dma_handle_t *dmah;
int i, j;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
dmah = dma->bufs[i].seglist[j];
dma_free_coherent(dev->dev,
dmah->size,
dmah->vaddr,
dmah->busaddr);
kfree(dmah);
}
}
kfree(dma->bufs[i].seglist);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
kfree(dma->bufs[i].buflist[j].dev_private);
}
kfree(dma->bufs[i].buflist);
}
}
kfree(dma->buflist);
kfree(dma->pagelist);
kfree(dev->dma);
dev->dma = NULL;
}
/**
* drm_legacy_free_buffer() - Free a buffer.
*
* @dev: DRM device.
* @buf: buffer to free.
*
* Resets the fields of \p buf.
*/
void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
}
/**
* drm_legacy_reclaim_buffers() - Reclaim the buffers.
*
* @dev: DRM device.
* @file_priv: DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_legacy_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Simple open hash tab implementation.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <linux/hash.h>
#include <linux/mm.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int size = 1 << order;
ht->order = order;
ht->table = NULL;
if (size <= PAGE_SIZE / sizeof(*ht->table))
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
else
ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
return 0;
}
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
int count = 0;
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry_rcu(entry, h_list, head) {
if (entry->key == key)
return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *parent;
unsigned int hashed_key;
unsigned long key = item->key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
parent = &entry->head;
}
if (parent) {
hlist_add_behind_rcu(&item->head, parent);
} else {
hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
/*
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
int ret;
unsigned long mask = (1UL << bits) - 1;
unsigned long first, unshifted_key;
unshifted_key = hash_long(seed, bits);
first = unshifted_key;
do {
item->key = (unshifted_key << shift) + add;
ret = drm_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
} while(ret && (unshifted_key != first));
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
return -EINVAL;
}
return 0;
}
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
{
struct hlist_node *list;
list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
*item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
struct hlist_node *list;
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
}
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init_rcu(&item->head);
return 0;
}
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
kvfree(ht->table);
ht->table = NULL;
}
}
......@@ -121,11 +121,6 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
/* drm_irq.c */
/* IOCTLS */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif
int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
......
/*
* drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/interrupt.h> /* For task queue support */
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_legacy.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "drm_internal.h"
static int drm_legacy_irq_install(struct drm_device *dev, int irq)
{
int ret;
unsigned long sh_flags = 0;
if (irq == 0)
return -EINVAL;
if (dev->irq_enabled)
return -EBUSY;
dev->irq_enabled = true;
DRM_DEBUG("irq=%d\n", irq);
/* Before installing handler */
if (dev->driver->irq_preinstall)
dev->driver->irq_preinstall(dev);
/* PCI devices require shared interrupts. */
if (dev_is_pci(dev->dev))
sh_flags = IRQF_SHARED;
ret = request_irq(irq, dev->driver->irq_handler,
sh_flags, dev->driver->name, dev);
if (ret < 0) {
dev->irq_enabled = false;
return ret;
}
/* After installing handler */
if (dev->driver->irq_postinstall)
ret = dev->driver->irq_postinstall(dev);
if (ret < 0) {
dev->irq_enabled = false;
if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_unregister(to_pci_dev(dev->dev));
free_irq(irq, dev);
} else {
dev->irq = irq;
}
return ret;
}
int drm_legacy_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
bool irq_enabled;
int i;
irq_enabled = dev->irq_enabled;
dev->irq_enabled = false;
/*
* Wake up any waiters so they don't hang. This is just to paper over
* issues for UMS drivers which aren't in full control of their
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/
if (drm_dev_has_vblank(dev)) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
if (!vblank->enabled)
continue;
WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
drm_vblank_disable_and_save(dev, i);
wake_up(&vblank->queue);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_unregister(to_pci_dev(dev->dev));
if (dev->driver->irq_uninstall)
dev->driver->irq_uninstall(dev);
free_irq(dev->irq, dev);
return 0;
}
EXPORT_SYMBOL(drm_legacy_irq_uninstall);
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int ret = 0, irq;
struct pci_dev *pdev;
/* if we haven't irq we fallback for compatibility reasons -
* this used to be a separate function in drm_dma.h
*/
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
/* UMS was only ever supported on pci devices. */
if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
pdev = to_pci_dev(dev->dev);
irq = pdev->irq;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != irq)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
ret = drm_legacy_irq_install(dev, irq);
mutex_unlock(&dev->struct_mutex);
return ret;
case DRM_UNINST_HANDLER:
mutex_lock(&dev->struct_mutex);
ret = drm_legacy_irq_uninstall(dev);
mutex_unlock(&dev->struct_mutex);
return ret;
default:
return -EINVAL;
}
}
#ifndef __DRM_LEGACY_H__
#define __DRM_LEGACY_H__
/*
* Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* This file contains legacy interfaces that modern drm drivers
* should no longer be using. They cannot be removed as legacy
* drivers use them, and removing them are API breaks.
*/
#include <linux/list.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_legacy.h>
struct agp_memory;
struct drm_buf_desc;
struct drm_device;
struct drm_file;
struct drm_hash_item;
struct drm_open_hash;
/*
* Hash-table Support
*/
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
/* drm_hashtab.c */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
void drm_ht_remove(struct drm_open_hash *ht);
#endif
/*
* RCU-safe interface
*
* The user of this API needs to make sure that two or more instances of the
* hash table manipulation functions are never run simultaneously.
* The lookup function drm_ht_find_item_rcu may, however, run simultaneously
* with any of the manipulation functions as long as it's called from within
* an RCU read-locked section.
*/
#define drm_ht_insert_item_rcu drm_ht_insert_item
#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
#define drm_ht_remove_key_rcu drm_ht_remove_key
#define drm_ht_remove_item_rcu drm_ht_remove_item
#define drm_ht_find_item_rcu drm_ht_find_item
/*
* Generic DRM Contexts
*/
#define DRM_KERNEL_CONTEXT 0
#define DRM_RESERVED_CONTEXTS 1
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_ctxbitmap_init(struct drm_device *dev);
void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
#else
static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {}
static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {}
static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {}
#endif
void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
#endif
/*
* Generic Buffer Management
*/
#define DRM_MAP_HASH_OFFSET 0x10000000
#if IS_ENABLED(CONFIG_DRM_LEGACY)
static inline int drm_legacy_create_map_hash(struct drm_device *dev)
{
return drm_ht_create(&dev->map_hash, 12);
}
static inline void drm_legacy_remove_map_hash(struct drm_device *dev)
{
drm_ht_remove(&dev->map_hash);
}
#else
static inline int drm_legacy_create_map_hash(struct drm_device *dev)
{
return 0;
}
static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {}
#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
#endif
int __drm_legacy_infobufs(struct drm_device *, void *, int *,
int (*)(void *, int, struct drm_buf_entry *));
int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
void __user **,
int (*)(void *, int, unsigned long, struct drm_buf *),
struct drm_file *);
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_master_rmmaps(struct drm_device *dev,
struct drm_master *master);
void drm_legacy_rmmaps(struct drm_device *dev);
#else
static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
struct drm_master *master) {}
static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *d);
#else
static inline void drm_legacy_vma_flush(struct drm_device *d)
{
/* do nothing */
}
#endif
/*
* AGP Support
*/
struct drm_agp_mem {
unsigned long handle;
struct agp_memory *memory;
unsigned long bound;
int pages;
struct list_head head;
};
/* drm_agpsupport.c */
#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
void drm_legacy_agp_clear(struct drm_device *dev);
int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#else
static inline void drm_legacy_agp_clear(struct drm_device *dev) {}
#endif
/* drm_lock.c */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
#else
static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {}
#endif
/* DMA support */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_dma_setup(struct drm_device *dev);
void drm_legacy_dma_takedown(struct drm_device *dev);
#else
static inline int drm_legacy_dma_setup(struct drm_device *dev)
{
return 0;
}
#endif
void drm_legacy_free_buffer(struct drm_device *dev,
struct drm_buf * buf);
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *filp);
#else
static inline void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *filp) {}
#endif
/* Scatter Gather Support */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_sg_cleanup(struct drm_device *dev);
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_init_members(struct drm_device *dev);
void drm_legacy_destroy_members(struct drm_device *dev);
void drm_legacy_dev_reinit(struct drm_device *dev);
int drm_legacy_setup(struct drm_device * dev);
#else
static inline void drm_legacy_init_members(struct drm_device *dev) {}
static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
static inline int drm_legacy_setup(struct drm_device * dev) { return 0; }
#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master);
#else
static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {}
#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_master_legacy_init(struct drm_master *master);
#else
static inline void drm_master_legacy_init(struct drm_master *master) {}
#endif
/* drm_pci.c */
#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI)
int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv);
void drm_legacy_pci_agp_destroy(struct drm_device *dev);
#else
static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -EINVAL;
}
static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {}
#endif
#endif /* __DRM_LEGACY_H__ */
/*
* \file drm_legacy_misc.c
* Misc legacy support functions.
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
void drm_legacy_init_members(struct drm_device *dev)
{
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
spin_lock_init(&dev->buf_lock);
mutex_init(&dev->ctxlist_mutex);
}
void drm_legacy_destroy_members(struct drm_device *dev)
{
mutex_destroy(&dev->ctxlist_mutex);
}
int drm_legacy_setup(struct drm_device * dev)
{
int ret;
if (dev->driver->firstopen &&
drm_core_check_feature(dev, DRIVER_LEGACY)) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
return ret;
}
ret = drm_legacy_dma_setup(dev);
if (ret < 0)
return ret;
DRM_DEBUG("\n");
return 0;
}
void drm_legacy_dev_reinit(struct drm_device *dev)
{
if (dev->irq_enabled)
drm_legacy_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
drm_legacy_agp_clear(dev);
drm_legacy_sg_cleanup(dev);
drm_legacy_vma_flush(dev);
drm_legacy_dma_takedown(dev);
mutex_unlock(&dev->struct_mutex);
dev->sigdata.lock = NULL;
dev->context_flag = 0;
dev->last_context = 0;
dev->if_version = 0;
DRM_DEBUG("lastclose completed\n");
}
void drm_master_legacy_init(struct drm_master *master)
{
spin_lock_init(&master->lock.spinlock);
init_waitqueue_head(&master->lock.lock_queue);
}
/*
* \file drm_lock.c
* IOCTLs for locking
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/sched/signal.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
/*
* Take the heavyweight lock.
*
* \param lock lock pointer.
* \param context locking context.
* \return one if the lock is held, or zero otherwise.
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
static
int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock_bh(&lock_data->spinlock);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else {
new = context | _DRM_LOCK_HELD |
((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
_DRM_LOCK_CONT : 0);
}
prev = cmpxchg(lock, old, new);
} while (prev != old);
spin_unlock_bh(&lock_data->spinlock);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
}
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
/*
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
*
* \param dev DRM device.
* \param lock lock pointer.
* \param context locking context.
* \return always one.
*
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
static int drm_lock_transfer(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
lock_data->file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
prev = cmpxchg(lock, old, new);
} while (prev != old);
return 1;
}
static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock_bh(&lock_data->spinlock);
if (lock_data->kernel_waiters != 0) {
drm_lock_transfer(lock_data, 0);
lock_data->idle_has_lock = 1;
spin_unlock_bh(&lock_data->spinlock);
return 1;
}
spin_unlock_bh(&lock_data->spinlock);
do {
old = *lock;
new = _DRM_LOCKING_CONTEXT(old);
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
/*
* Lock ioctl.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Add the current task to the lock wait queue, and attempt to take to lock.
*/
int drm_legacy_lock(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DECLARE_WAITQUEUE(entry, current);
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
++file_priv->lock_count;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
task_pid_nr(current), lock->context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, task_pid_nr(current),
master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
lock->flags);
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters++;
spin_unlock_bh(&master->lock.spinlock);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!master->lock.hw_lock) {
/* Device has been unregistered */
send_sig(SIGTERM, current, 0);
ret = -EINTR;
break;
}
if (drm_lock_take(&master->lock, lock->context)) {
master->lock.file_priv = file_priv;
master->lock.lock_time = jiffies;
break; /* Got lock */
}
/* Contention */
mutex_unlock(&drm_global_mutex);
schedule();
mutex_lock(&drm_global_mutex);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters--;
spin_unlock_bh(&master->lock.spinlock);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&master->lock.lock_queue, &entry);
DRM_DEBUG("%d %s\n", lock->context,
ret ? "interrupted" : "has lock");
if (ret) return ret;
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
if (!drm_is_current_master(file_priv)) {
dev->sigdata.context = lock->context;
dev->sigdata.lock = master->lock.hw_lock;
}
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
{
if (dev->driver->dma_quiescent(dev)) {
DRM_DEBUG("%d waiting for DMA quiescent\n",
lock->context);
return -EBUSY;
}
}
return 0;
}
/*
* Unlock ioctl.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Transfer and free the lock.
*/
int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
task_pid_nr(current), lock->context);
return -EINVAL;
}
if (drm_legacy_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}
return 0;
}
/*
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
*
* This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
* by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
* a deadlock, which is why the "idlelock" was invented).
*
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
spin_lock_bh(&lock_data->spinlock);
lock_data->kernel_waiters++;
if (!lock_data->idle_has_lock) {
spin_unlock_bh(&lock_data->spinlock);
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
spin_lock_bh(&lock_data->spinlock);
if (ret == 1)
lock_data->idle_has_lock = 1;
}
spin_unlock_bh(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_legacy_idlelock_take);
void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock_bh(&lock_data->spinlock);
if (--lock_data->kernel_waiters == 0) {
if (lock_data->idle_has_lock) {
do {
old = *lock;
prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
} while (prev != old);
wake_up_interruptible(&lock_data->lock_queue);
lock_data->idle_has_lock = 0;
}
}
spin_unlock_bh(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_legacy_idlelock_release);
static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
return (file_priv->lock_count && master->lock.hw_lock &&
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
/* if the master has gone away we can't do anything with the lock */
if (!dev->master)
return;
if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_legacy_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
}
void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
/*
* Since the master is disappearing, so is the
* possibility to lock.
*/
mutex_lock(&dev->struct_mutex);
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL;
master->lock.file_priv = NULL;
wake_up_interruptible_all(&master->lock.lock_queue);
}
mutex_unlock(&dev->struct_mutex);
}
/*
* \file drm_memory.c
* Memory management wrappers for DRM
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include "drm_legacy.h"
#if IS_ENABLED(CONFIG_AGP)
#ifdef HAVE_PAGE_AGP
# include <asm/agp.h>
#else
# ifdef __powerpc__
# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
# else
# define PAGE_AGP PAGE_KERNEL
# endif
#endif
static void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device *dev)
{
unsigned long i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
struct page **phys_page_map;
void *addr;
size = PAGE_ALIGN(size);
#ifdef __alpha__
offset -= dev->hose->mem_space->start;
#endif
list_for_each_entry(agpmem, &dev->agp->memory, head)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
if (&agpmem->head == &dev->agp->memory)
return NULL;
/*
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
* the CPU do not get remapped by the GART. We fix this by using the kernel's
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
if (!page_map)
return NULL;
phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
for (i = 0; i < num_pages; ++i)
page_map[i] = phys_page_map[i];
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
return addr;
}
#else /* CONFIG_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device *dev)
{
return NULL;
}
#endif /* CONFIG_AGP */
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
}
EXPORT_SYMBOL(drm_legacy_ioremap);
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap_wc(map->offset, map->size);
}
EXPORT_SYMBOL(drm_legacy_ioremap_wc);
void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
if (!map->handle || !map->size)
return;
if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
......@@ -35,13 +35,6 @@
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
#ifdef CONFIG_DRM_LEGACY
/* List of devices hanging off drivers with stealth attach. */
static LIST_HEAD(legacy_dev_list);
static DEFINE_MUTEX(legacy_dev_list_lock);
#endif
static int drm_get_pci_domain(struct drm_device *dev)
{
......@@ -72,199 +65,3 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
master->unique_len = strlen(master->unique);
return 0;
}
#ifdef CONFIG_DRM_LEGACY
static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != pdev->bus->number ||
p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
return -EINVAL;
p->irq = pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
}
/**
* drm_legacy_irq_by_busid - Get interrupt from bus ID
* @dev: DRM device
* @data: IOCTL parameter pointing to a drm_irq_busid structure
* @file_priv: DRM file private.
*
* Finds the PCI device with the specified bus id and gets its IRQ number.
* This IOCTL is deprecated, and will now return EINVAL for any busid not equal
* to that of the device that this DRM instance attached to.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_irq_busid *p = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
/* UMS was only ever support on PCI devices. */
if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EOPNOTSUPP;
return drm_legacy_pci_irq_by_busid(dev, p);
}
void drm_legacy_pci_agp_destroy(struct drm_device *dev)
{
if (dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
drm_legacy_agp_clear(dev);
kfree(dev->agp);
dev->agp = NULL;
}
}
static void drm_legacy_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
dev->agp = drm_legacy_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024);
}
}
}
static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
DRM_DEBUG("\n");
dev = drm_dev_alloc(driver, &pdev->dev);
if (IS_ERR(dev))
return PTR_ERR(dev);
ret = pci_enable_device(pdev);
if (ret)
goto err_free;
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
drm_legacy_pci_agp_init(dev);
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
goto err_agp;
if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
mutex_lock(&legacy_dev_list_lock);
list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
mutex_unlock(&legacy_dev_list_lock);
}
return 0;
err_agp:
drm_legacy_pci_agp_destroy(dev);
pci_disable_device(pdev);
err_free:
drm_dev_put(dev);
return ret;
}
/**
* drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
* @driver: DRM device driver
* @pdriver: PCI device driver
*
* This is only used by legacy dri1 drivers and deprecated.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_legacy_pci_init(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
DRM_DEBUG("\n");
if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
return -EINVAL;
/* If not using KMS, fall back to stealth mode manual scanning. */
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
* function that pci_get_subsys and pci_get_class used, we'd
* be able to just pass pid in instead of doing a two-stage
* thing.
*/
pdev = NULL;
while ((pdev =
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
pid->subdevice, pdev)) != NULL) {
if ((pdev->class & pid->class_mask) != pid->class)
continue;
/* stealth mode requires a manual probe */
pci_dev_get(pdev);
drm_legacy_get_pci_dev(pdev, pid, driver);
}
}
return 0;
}
EXPORT_SYMBOL(drm_legacy_pci_init);
/**
* drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
* @driver: DRM device driver
* @pdriver: PCI device driver
*
* Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
* is deprecated and only used by dri1 drivers.
*/
void drm_legacy_pci_exit(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (!(driver->driver_features & DRIVER_LEGACY)) {
WARN_ON(1);
} else {
mutex_lock(&legacy_dev_list_lock);
list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
legacy_dev_list) {
if (dev->driver == driver) {
list_del(&dev->legacy_dev_list);
drm_put_dev(dev);
}
}
mutex_unlock(&legacy_dev_list_lock);
}
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_legacy_pci_exit);
#endif
/*
* \file drm_scatter.c
* IOCTLs to manage scatter/gather memory
*
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
#define DEBUG_SCATTER 0
static void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
int i;
for (i = 0; i < entry->pages; i++) {
page = entry->pagelist[i];
if (page)
ClearPageReserved(page);
}
vfree(entry->virtual);
kfree(entry->busaddr);
kfree(entry->pagelist);
kfree(entry);
}
void drm_legacy_sg_cleanup(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
drm_core_check_feature(dev, DRIVER_LEGACY)) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
}
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
if (request->size > SIZE_MAX - PAGE_SIZE)
return -EINVAL;
if (dev->sg)
return -EINVAL;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
if (!entry->pagelist) {
kfree(entry);
return -ENOMEM;
}
entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
if (!entry->busaddr) {
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
if (!entry->virtual) {
kfree(entry->busaddr);
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
/* This also forces the mapping of COW pages, so our page list
* will be valid. Please don't remove it...
*/
memset(entry->virtual, 0, pages << PAGE_SHIFT);
entry->handle = ScatterHandle((unsigned long)entry->virtual);
DRM_DEBUG("handle = %08lx\n", entry->handle);
DRM_DEBUG("virtual = %p\n", entry->virtual);
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j])
goto failed;
SetPageReserved(entry->pagelist[j]);
}
request->handle = entry->handle;
dev->sg = entry;
#if DEBUG_SCATTER
/* Verify that each page points to its virtual address, and vice
* versa.
*/
{
int error = 0;
for (i = 0; i < pages; i++) {
unsigned long *tmp;
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0xcafebabe;
}
tmp = (unsigned long *)((u8 *) entry->virtual +
(PAGE_SIZE * i));
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
if (*tmp != 0xcafebabe && error == 0) {
error = 1;
DRM_ERROR("Scatter allocation error, "
"pagelist does not match "
"virtual mapping\n");
}
}
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0;
}
}
if (error == 0)
DRM_ERROR("Scatter allocation matches pagelist\n");
}
#endif
return 0;
failed:
drm_sg_cleanup(entry);
return -ENOMEM;
}
int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
entry = dev->sg;
dev->sg = NULL;
if (!entry || entry->handle != request->handle)
return -EINVAL;
DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);
return 0;
}
/*
* \file drm_vm.c
* Memory mapping for DRM
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
#if defined(__ia64__)
#include <linux/efi.h>
#include <linux/slab.h>
#endif
#include <linux/mem_encrypt.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
struct drm_vma_entry {
struct list_head head;
struct vm_area_struct *vma;
pid_t pid;
};
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static pgprot_t drm_io_prot(struct drm_local_map *map,
struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
defined(__mips__) || defined(__loongarch__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
else
tmp = pgprot_writecombine(tmp);
#elif defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#elif defined(__sparc__) || defined(__arm__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
tmp = pgprot_noncached_wc(tmp);
#endif
return tmp;
}
/*
* \c fault method for AGP virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Find the right map and if it's AGP memory find the real physical page to
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list;
struct drm_hash_item *hash;
/*
* Find the right map
*/
if (!dev->agp)
goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_fault_error;
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_fault_error;
r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map;
if (map && map->type == _DRM_AGP) {
/*
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
resource_size_t offset = vmf->address - vma->vm_start;
resource_size_t baddr = map->offset + offset;
struct drm_agp_mem *agpmem;
struct page *page;
#ifdef __alpha__
/*
* Adjust to a bus-relative address
*/
baddr -= dev->hose->mem_space->start;
#endif
/*
* It's AGP memory - find the real physical page to map
*/
list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
}
if (&agpmem->head == &dev->agp->memory)
goto vm_fault_error;
/*
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
page = agpmem->memory->pages[offset];
get_page(page);
vmf->page = page;
DRM_DEBUG
("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
(unsigned long long)baddr,
agpmem->memory->pages[offset],
(unsigned long long)offset,
page_count(page));
return 0;
}
vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
#endif
/*
* \c nopage method for shared virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
unsigned long offset;
unsigned long i;
struct page *page;
if (!map)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = vmf->address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
return VM_FAULT_SIGBUS;
get_page(page);
vmf->page = page;
DRM_DEBUG("shm_fault 0x%lx\n", offset);
return 0;
}
/*
* \c close method for shared virtual memory.
*
* \param vma virtual memory area.
*
* Deletes map information if we are the last
* person to close a mapping and it's not in the global maplist.
*/
static void drm_vm_shm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
struct drm_local_map *map;
struct drm_map_list *r_list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
map = vma->vm_private_data;
mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
list_del(&pt->head);
kfree(pt);
}
}
/* We were the only map that was found */
if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
/* Check to see if we are in the maplist, if we are not, then
* we delete this mappings information.
*/
found_maps = 0;
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map == map)
found_maps++;
}
if (!found_maps) {
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
arch_phys_wc_del(map->mtrr);
iounmap(map->handle);
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
break;
}
kfree(map);
}
}
mutex_unlock(&dev->struct_mutex);
}
/*
* \c fault method for DMA virtual memory.
*
* \param address access address.
* \return pointer to the page structure.
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_device_dma *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
struct page *page;
if (!dma)
return VM_FAULT_SIGBUS; /* Error */
if (!dma->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = vmf->address - vma->vm_start;
/* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((void *)dma->pagelist[page_nr]);
get_page(page);
vmf->page = page;
DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
return 0;
}
/*
* \c fault method for scatter-gather virtual memory.
*
* \param address access address.
* \return pointer to the page structure.
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_sg_mem *entry = dev->sg;
unsigned long offset;
unsigned long map_offset;
unsigned long page_offset;
struct page *page;
if (!entry)
return VM_FAULT_SIGBUS; /* Error */
if (!entry->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = vmf->address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset];
get_page(page);
vmf->page = page;
return 0;
}
/** AGP virtual memory operations */
static const struct vm_operations_struct drm_vm_ops = {
.fault = drm_vm_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
/** Shared virtual memory operations */
static const struct vm_operations_struct drm_vm_shm_ops = {
.fault = drm_vm_shm_fault,
.open = drm_vm_open,
.close = drm_vm_shm_close,
};
/** DMA virtual memory operations */
static const struct vm_operations_struct drm_vm_dma_ops = {
.fault = drm_vm_dma_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
/** Scatter-gather virtual memory operations */
static const struct vm_operations_struct drm_vm_sg_ops = {
.fault = drm_vm_sg_fault,
.open = drm_vm_open,
.close = drm_vm_close,
};
static void drm_vm_open_locked(struct drm_device *dev,
struct vm_area_struct *vma)
{
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
if (vma_entry) {
vma_entry->vma = vma;
vma_entry->pid = current->pid;
list_add(&vma_entry->head, &dev->vmalist);
}
}
static void drm_vm_open(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_open_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
static void drm_vm_close_locked(struct drm_device *dev,
struct vm_area_struct *vma)
{
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
list_del(&pt->head);
kfree(pt);
break;
}
}
}
/*
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
*
* Search the \p vma private data entry in drm_device::vmalist, unlink it, and
* free it.
*/
static void drm_vm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_close_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
* \param vma virtual memory area.
* \return zero on success or a negative number on failure.
*
* Sets the virtual memory area operations structure to vm_dma_ops, the file
* pointer, and calls vm_open().
*/
static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev;
struct drm_device_dma *dma;
unsigned long length = vma->vm_end - vma->vm_start;
dev = priv->minor->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
return -EINVAL;
}
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot =
__pgprot(pte_val
(pte_wrprotect
(__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
vma->vm_ops = &drm_vm_dma_ops;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
}
static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
return dev->hose->dense_mem_base;
#else
return 0;
#endif
}
/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
* \param vma virtual memory area.
* \return zero on success or a negative number on failure.
*
* If the virtual memory area has no offset associated with it then it's a DMA
* area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
* checks that the restricted flag is not set, sets the virtual memory operations
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_local_map *map = NULL;
resource_size_t offset = 0;
struct drm_hash_item *hash;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
if (!priv->authenticated)
return -EACCES;
/* We check for "dma". On Apple's UniNorth, it's valid to have
* the AGP mapped at physical address 0
* --BenH.
*/
if (!vma->vm_pgoff
#if IS_ENABLED(CONFIG_AGP)
&& (!dev->agp
|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
#endif
)
return drm_mmap_dma(filp, vma);
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
DRM_ERROR("Could not find map\n");
return -EINVAL;
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot =
__pgprot(pte_val
(pte_wrprotect
(__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
switch (map->type) {
#if !defined(__arm__)
case _DRM_AGP:
if (dev->agp && dev->agp->cant_use_aperture) {
/*
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
* pages and mappings in fault()
*/
#if defined(__powerpc__)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
vma->vm_ops = &drm_vm_ops;
break;
}
fallthrough; /* to _DRM_FRAME_BUFFER... */
#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
vma->vm_page_prot = drm_io_prot(map, vma);
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%llx\n",
map->type,
vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_CONSISTENT:
/* Consistent memory is really like shared memory. But
* it's allocated in a different way, so avoid fault */
if (remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(map->handle)),
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
fallthrough; /* to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
default:
return -EINVAL; /* This should never happen. */
}
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
}
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
int ret;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
ret = drm_mmap_locked(filp, vma);
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_legacy_mmap);
#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *dev)
{
struct drm_vma_entry *vma, *vma_temp;
/* Clear vma list (only needed for legacy drivers) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
kfree(vma);
}
}
#endif
......@@ -33,24 +33,6 @@
#include <linux/wait.h>
struct drm_file;
struct drm_hw_lock;
/*
* Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
* include ordering reasons.
*
* DO NOT USE.
*/
struct drm_lock_data {
struct drm_hw_lock *hw_lock;
struct drm_file *file_priv;
wait_queue_head_t lock_queue;
unsigned long lock_time;
spinlock_t spinlock;
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
};
/**
* struct drm_master - drm master structure
......@@ -145,10 +127,6 @@ struct drm_master {
* Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
*/
struct idr lessee_idr;
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
struct drm_lock_data lock;
#endif
};
struct drm_master *drm_master_get(struct drm_master *master);
......
......@@ -6,7 +6,6 @@
#include <linux/mutex.h>
#include <linux/idr.h>
#include <drm/drm_legacy.h>
#include <drm/drm_mode_config.h>
struct drm_driver;
......@@ -153,8 +152,8 @@ struct drm_device {
*
* Lock for others (not &drm_minor.master and &drm_file.is_master)
*
* WARNING:
* Only drivers annotated with DRIVER_LEGACY should be using this.
* TODO: This lock used to be the BKL of the DRM subsystem. Move the
* lock into i915, which is the only remaining user.
*/
struct mutex struct_mutex;
......@@ -317,72 +316,6 @@ struct drm_device {
* Root directory for debugfs files.
*/
struct dentry *debugfs_root;
/* Everything below here is for legacy driver, never use! */
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
/* List of devices per driver for stealth attach cleanup */
struct list_head legacy_dev_list;
#ifdef __alpha__
/** @hose: PCI hose, only used on ALPHA platforms. */
struct pci_controller *hose;
#endif
/* AGP data */
struct drm_agp_head *agp;
/* Context handle management - linked list of context handles */
struct list_head ctxlist;
/* Context handle management - mutex for &ctxlist */
struct mutex ctxlist_mutex;
/* Context handle management */
struct idr ctx_idr;
/* Memory management - linked list of regions */
struct list_head maplist;
/* Memory management - user token hash table for maps */
struct drm_open_hash map_hash;
/* Context handle management - list of vmas (for debugging) */
struct list_head vmalist;
/* Optional pointer for DMA support */
struct drm_device_dma *dma;
/* Context swapping flag */
__volatile__ long context_flag;
/* Last current context */
int last_context;
/* Lock for &buf_use and a few other things. */
spinlock_t buf_lock;
/* Usage counter for buffers in use -- cannot alloc */
int buf_use;
/* Buffer allocation in progress */
atomic_t buf_alloc;
struct {
int context;
struct drm_hw_lock *lock;
} sigdata;
struct drm_local_map *agp_buffer_map;
unsigned int agp_buffer_token;
/* Scatter gather memory */
struct drm_sg_mem *sg;
/* IRQs */
bool irq_enabled;
int irq;
#endif
};
#endif
......@@ -442,25 +442,6 @@ struct drm_driver {
* some examples.
*/
const struct file_operations *fops;
#ifdef CONFIG_DRM_LEGACY
/* Everything below here is for legacy driver, never use! */
/* private: */
int (*firstopen) (struct drm_device *);
void (*preclose) (struct drm_device *, struct drm_file *file_priv);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
irqreturn_t (*irq_handler)(int irq, void *arg);
void (*irq_preinstall)(struct drm_device *dev);
int (*irq_postinstall)(struct drm_device *dev);
void (*irq_uninstall)(struct drm_device *dev);
u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
int dev_priv_size;
#endif
};
void *__devm_drm_dev_alloc(struct device *parent,
......
......@@ -386,11 +386,6 @@ struct drm_file {
* Per-file buffer caches used by the PRIME buffer sharing code.
*/
struct drm_prime_file_private prime;
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
unsigned long lock_count; /* DRI1 legacy lock count */
#endif
};
/**
......
#ifndef __DRM_DRM_LEGACY_H__
#define __DRM_DRM_LEGACY_H__
/*
* Legacy driver interfaces for the Direct Rendering Manager
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* Copyright (c) 2009-2010, Code Aurora Forum.
* All rights reserved.
* Copyright © 2014 Intel Corporation
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
* Author: Gareth Hughes <gareth@valinux.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/agp_backend.h>
#include <drm/drm.h>
#include <drm/drm_auth.h>
struct drm_device;
struct drm_driver;
struct file;
struct pci_driver;
/*
* Legacy Support for palateontologic DRM drivers
*
* If you add a new driver and it uses any of these functions or structures,
* you're doing it terribly wrong.
*/
/*
* Hash-table Support
*/
struct drm_hash_item {
struct hlist_node head;
unsigned long key;
};
struct drm_open_hash {
struct hlist_head *table;
u8 order;
};
/**
* DMA buffer.
*/
struct drm_buf {
int idx; /**< Index into master buflist */
int total; /**< Buffer size */
int order; /**< log-base-2(total) */
int used; /**< Amount of buffer in use (for DMA) */
unsigned long offset; /**< Byte offset (used internally) */
void *address; /**< Address of buffer */
unsigned long bus_address; /**< Bus address of buffer */
struct drm_buf *next; /**< Kernel-only: used for free list */
__volatile__ int waiting; /**< On kernel DMA queue */
__volatile__ int pending; /**< On hardware DMA queue */
struct drm_file *file_priv; /**< Private of holding file descr */
int context; /**< Kernel queue for this buffer */
int while_locked; /**< Dispatch this buffer while locked */
enum {
DRM_LIST_NONE = 0,
DRM_LIST_FREE = 1,
DRM_LIST_WAIT = 2,
DRM_LIST_PEND = 3,
DRM_LIST_PRIO = 4,
DRM_LIST_RECLAIM = 5
} list; /**< Which list we're on */
int dev_priv_size; /**< Size of buffer private storage */
void *dev_private; /**< Per-buffer private storage */
};
typedef struct drm_dma_handle {
dma_addr_t busaddr;
void *vaddr;
size_t size;
} drm_dma_handle_t;
/**
* Buffer entry. There is one of this for each buffer size order.
*/
struct drm_buf_entry {
int buf_size; /**< size */
int buf_count; /**< number of buffers */
struct drm_buf *buflist; /**< buffer list */
int seg_count;
int page_order;
struct drm_dma_handle **seglist;
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
};
/**
* DMA data.
*/
struct drm_device_dma {
struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
int buf_count; /**< total number of buffers */
struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
int seg_count;
int page_count; /**< number of pages */
unsigned long *pagelist; /**< page list */
unsigned long byte_count;
enum {
_DRM_DMA_USE_AGP = 0x01,
_DRM_DMA_USE_SG = 0x02,
_DRM_DMA_USE_FB = 0x04,
_DRM_DMA_USE_PCI_RO = 0x08
} flags;
};
/**
* Scatter-gather memory.
*/
struct drm_sg_mem {
unsigned long handle;
void *virtual;
int pages;
struct page **pagelist;
dma_addr_t *busaddr;
};
/**
* Kernel side of a mapping
*/
struct drm_local_map {
dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
};
typedef struct drm_local_map drm_local_map_t;
/**
* Mappings list
*/
struct drm_map_list {
struct list_head head; /**< list head */
struct drm_hash_item hash;
struct drm_local_map *map; /**< mapping */
uint64_t user_token;
struct drm_master *master;
};
int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_p);
struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
/**
* Test that the hardware lock is held by the caller, returning otherwise.
*
* \param dev DRM device.
* \param filp file pointer of the caller.
*/
#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
do { \
if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
_file_priv->master->lock.file_priv != _file_priv) { \
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
__func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
_file_priv->master->lock.file_priv, _file_priv); \
return -EINVAL; \
} \
} while (0)
void drm_legacy_idlelock_take(struct drm_lock_data *lock);
void drm_legacy_idlelock_release(struct drm_lock_data *lock);
/* drm_irq.c */
int drm_legacy_irq_uninstall(struct drm_device *dev);
/* drm_pci.c */
#ifdef CONFIG_PCI
int drm_legacy_pci_init(const struct drm_driver *driver,
struct pci_driver *pdriver);
void drm_legacy_pci_exit(const struct drm_driver *driver,
struct pci_driver *pdriver);
#else
static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
size_t size, size_t align)
{
return NULL;
}
static inline void drm_pci_free(struct drm_device *dev,
struct drm_dma_handle *dmah)
{
}
static inline int drm_legacy_pci_init(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
return -EINVAL;
}
static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
struct pci_driver *pdriver)
{
}
#endif
/*
* AGP Support
*/
struct drm_agp_head {
struct agp_kern_info agp_info;
struct list_head memory;
unsigned long mode;
struct agp_bridge_data *bridge;
int enabled;
int acquired;
unsigned long base;
int agp_mtrr;
int cant_use_aperture;
unsigned long page_mask;
};
#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
int drm_legacy_agp_acquire(struct drm_device *dev);
int drm_legacy_agp_release(struct drm_device *dev);
int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
#else
static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
{
return NULL;
}
static inline int drm_legacy_agp_acquire(struct drm_device *dev)
{
return -ENODEV;
}
static inline int drm_legacy_agp_release(struct drm_device *dev)
{
return -ENODEV;
}
static inline int drm_legacy_agp_enable(struct drm_device *dev,
struct drm_agp_mode mode)
{
return -ENODEV;
}
static inline int drm_legacy_agp_info(struct drm_device *dev,
struct drm_agp_info *info)
{
return -ENODEV;
}
static inline int drm_legacy_agp_alloc(struct drm_device *dev,
struct drm_agp_buffer *request)
{
return -ENODEV;
}
static inline int drm_legacy_agp_free(struct drm_device *dev,
struct drm_agp_buffer *request)
{
return -ENODEV;
}
static inline int drm_legacy_agp_unbind(struct drm_device *dev,
struct drm_agp_binding *request)
{
return -ENODEV;
}
static inline int drm_legacy_agp_bind(struct drm_device *dev,
struct drm_agp_binding *request)
{
return -ENODEV;
}
#endif
/* drm_memory.c */
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
#endif /* __DRM_DRM_LEGACY_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment