Commit 6177c8bb authored by Linus Torvalds's avatar Linus Torvalds

Merge DRI CVS tree into standard kernel

parent a58a63c3
#
# drm device configuration
# Drm device configuration
#
# This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
......@@ -12,5 +12,6 @@ if [ "$CONFIG_DRM" != "n" ]; then
tristate ' ATI Rage 128' CONFIG_DRM_R128
dep_tristate ' ATI Radeon' CONFIG_DRM_RADEON $CONFIG_AGP
dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP
dep_tristate ' Intel 830M' CONFIG_DRM_I830 $CONFIG_AGP
dep_tristate ' Matrox g200/g400' CONFIG_DRM_MGA $CONFIG_AGP
fi
......@@ -9,6 +9,7 @@ tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o
i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o
......@@ -18,6 +19,7 @@ obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_FFB) += ffb.o
include $(TOPDIR)/Rules.make
......@@ -104,9 +104,9 @@ typedef struct drm_tex_region {
#include "i810_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
#ifdef CONFIG_DRM_SIS
#include "sis_drm.h"
#endif
#include "i830_drm.h"
#include "gamma_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
......@@ -449,6 +449,12 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
......@@ -466,6 +472,7 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t)
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t)
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( 0x51, drm_r128_clear2_t)
/* Radeon specific ioctls */
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t)
......@@ -482,8 +489,15 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52)
/* Gamma specific ioctls */
#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t)
#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t)
#ifdef CONFIG_DRM_SIS
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
......@@ -493,6 +507,16 @@ typedef struct drm_scatter_gather {
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
#endif
/* I830 specific ioctls */
#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif
......@@ -66,27 +66,11 @@
#include <linux/types.h>
#include <linux/agp_backend.h>
#endif
#if LINUX_VERSION_CODE >= 0x020100 /* KERNEL_VERSION(2,1,0) */
#include <linux/tqueue.h>
#include <linux/poll.h>
#endif
#if LINUX_VERSION_CODE < 0x020400
#include "compat-pre24.h"
#endif
#include <asm/pgalloc.h>
#include "drm.h"
/* page_to_bus for earlier kernels, not optimal in all cases */
#ifndef page_to_bus
#define page_to_bus(page) ((unsigned int)(virt_to_bus(page_address(page))))
#endif
/* We just use virt_to_bus for pci_map_single on older kernels */
#if LINUX_VERSION_CODE < 0x020400
#define pci_map_single(hwdev, ptr, size, direction) virt_to_bus(ptr)
#define pci_unmap_single(hwdev, dma_addr, size, direction)
#endif
/* DRM template customization defaults
*/
#ifndef __HAVE_AGP
......@@ -161,179 +145,57 @@
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
/* Backward compatibility section */
/* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
#ifndef _PAGE_PWT
#define _PAGE_PWT _PAGE_WT
#endif
/* Wait queue declarations changed in 2.3.1 */
#ifndef DECLARE_WAITQUEUE
#define DECLARE_WAITQUEUE(w,c) struct wait_queue w = { c, NULL }
typedef struct wait_queue *wait_queue_head_t;
#define init_waitqueue_head(q) *q = NULL;
#endif
/* _PAGE_4M changed to _PAGE_PSE in 2.3.23 */
#ifndef _PAGE_PSE
#define _PAGE_PSE _PAGE_4M
#ifndef minor
#define minor(x) MINOR((x))
#endif
/* vm_offset changed to vm_pgoff in 2.3.25 */
#if LINUX_VERSION_CODE < 0x020319
#define VM_OFFSET(vma) ((vma)->vm_offset)
#else
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
#ifndef MODULE_LICENSE
#define MODULE_LICENSE(x)
#endif
/* *_nopage return values defined in 2.3.26 */
#ifndef NOPAGE_SIGBUS
#define NOPAGE_SIGBUS 0
#endif
#ifndef NOPAGE_OOM
#define NOPAGE_OOM 0
#ifndef preempt_disable
#define preempt_disable()
#define preempt_enable()
#endif
/* module_init/module_exit added in 2.3.13 */
#ifndef module_init
#define module_init(x) int init_module(void) { return x(); }
#endif
#ifndef module_exit
#define module_exit(x) void cleanup_module(void) { x(); }
#ifndef pte_offset_map
#define pte_offset_map pte_offset
#define pte_unmap(pte)
#endif
/* Generic cmpxchg added in 2.3.x */
#ifndef __HAVE_ARCH_CMPXCHG
/* Include this here so that driver can be
used with older kernels. */
#if defined(__alpha__)
static __inline__ unsigned long
__cmpxchg_u32(volatile int *m, int old, int new)
#if LINUX_VERSION_CODE < 0x020500
static inline struct page * vmalloc_to_page(void * vmalloc_addr)
{
unsigned long prev, cmp;
__asm__ __volatile__(
"1: ldl_l %0,%5\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stl_c %1,%2\n"
" beq %1,3f\n"
"2: mb\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m)
: "memory" );
return prev;
}
static __inline__ unsigned long
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{
unsigned long prev, cmp;
__asm__ __volatile__(
"1: ldq_l %0,%5\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stq_c %1,%2\n"
" beq %1,3f\n"
"2: mb\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m)
: "memory" );
return prev;
}
static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
case 8:
return __cmpxchg_u64(ptr, old, new);
unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, addr);
if (!pmd_none(*pmd)) {
preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
preempt_enable();
}
}
return old;
return page;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#elif __i386__
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}
#elif defined(__powerpc__)
extern void __cmpxchg_called_with_bad_pointer(void);
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 4:
__asm__ __volatile__(
"sync;"
"0: lwarx %0,0,%1 ;"
" cmpl 0,%0,%3;"
" bne 1f;"
" stwcx. %2,0,%1;"
" bne- 0b;"
"1: "
"sync;"
: "=&r"(prev)
: "r"(ptr), "r"(new), "r"(old)
: "cr0", "memory");
return prev;
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#endif /* i386, powerpc & alpha */
#endif
#ifndef __alpha__
#define cmpxchg(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
(unsigned long)(n),sizeof(*(ptr))))
#if LINUX_VERSION_CODE < 0x020500
#define DRM_RPR_ARG(vma)
#else
#define DRM_RPR_ARG(vma) vma,
#endif
#endif /* !__HAVE_ARCH_CMPXCHG */
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
/* Macros to make printk easier */
#define DRM_ERROR(fmt, arg...) \
......@@ -369,6 +231,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define DRM_IOREMAP(map) \
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
#define DRM_IOREMAP_NOCACHE(map) \
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size)
#define DRM_IOREMAPFREE(map) \
do { \
if ( (map)->handle && (map)->size ) \
......@@ -778,34 +643,18 @@ extern unsigned int DRM(poll)(struct file *filp,
struct poll_table_struct *wait);
/* Mapping support (drm_vm.h) */
#if LINUX_VERSION_CODE < 0x020317
extern unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused);
extern unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused);
extern unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused);
extern unsigned long DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused);
#else
/* Return type changed in 2.3.23 */
extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused);
int write_access);
extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused);
int write_access);
extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused);
int write_access);
extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused);
#endif
int write_access);
extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma);
extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
......@@ -827,6 +676,7 @@ extern unsigned long DRM(alloc_pages)(int order, int area);
extern void DRM(free_pages)(unsigned long address, int order,
int area);
extern void *DRM(ioremap)(unsigned long offset, unsigned long size);
extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size);
extern void DRM(ioremapfree)(void *pt, unsigned long size);
#if __REALLY_HAVE_AGP
......
......@@ -35,12 +35,8 @@
#if __REALLY_HAVE_AGP
#if LINUX_VERSION_CODE < 0x020400
#include "agpsupport-pre24.h"
#else
#define DRM_AGP_GET (drm_agp_t *)inter_module_get("drm_agp")
#define DRM_AGP_PUT inter_module_put("drm_agp")
#endif
static const drm_agp_t *drm_agp = NULL;
......@@ -271,24 +267,24 @@ drm_agp_head_t *DRM(agp_init)(void)
case INTEL_GX: head->chipset = "Intel 440GX"; break;
case INTEL_I810: head->chipset = "Intel i810"; break;
#if LINUX_VERSION_CODE >= 0x020400
case INTEL_I815: head->chipset = "Intel i815"; break;
#if LINUX_VERSION_CODE >= 0x020415
case INTEL_I820: head->chipset = "Intel i820"; break;
#endif
case INTEL_I840: head->chipset = "Intel i840"; break;
#if LINUX_VERSION_CODE >= 0x020415
case INTEL_I845: head->chipset = "Intel i845"; break;
case INTEL_I850: head->chipset = "Intel i850"; break;
#endif
case INTEL_I850: head->chipset = "Intel i850"; break;
case VIA_GENERIC: head->chipset = "VIA"; break;
case VIA_VP3: head->chipset = "VIA VP3"; break;
case VIA_MVP3: head->chipset = "VIA MVP3"; break;
#if LINUX_VERSION_CODE >= 0x020400
case VIA_MVP4: head->chipset = "VIA MVP4"; break;
case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133";
break;
case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133";
break;
#endif
case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro";
break;
......
......@@ -64,6 +64,7 @@ int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
hash = DRM(hash_magic)(magic);
entry = DRM(alloc)(sizeof(*entry), DRM_MEM_MAGIC);
if (!entry) return -ENOMEM;
memset(entry, 0, sizeof(*entry));
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
......
......@@ -229,11 +229,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
#else
if (pt->vma->vm_pte == map) found_maps++;
#endif
}
if(!found_maps) {
......
......@@ -27,6 +27,10 @@
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* ChangeLog:
* 2001-11-16 Torsten Duwe <duwe@caldera.de>
* added context constructor/destructor hooks,
* needed by SiS driver's memory management.
*/
#define __NO_VERSION__
......@@ -316,6 +320,10 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
#ifdef DRIVER_CTX_CTOR
if ( ctx.handle != DRM_KERNEL_CONTEXT )
DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */
#endif
if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
return -EFAULT;
......@@ -390,6 +398,9 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
priv->remove_auth_on_close = 1;
}
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
#ifdef DRIVER_CTX_DTOR
DRIVER_CTX_DTOR(ctx.handle); /* XXX: also pass dev ? */
#endif
DRM(ctxbitmap_free)( dev, ctx.handle );
}
......
......@@ -598,6 +598,25 @@ int DRM(control)( struct inode *inode, struct file *filp,
}
}
#else
int DRM(control)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_control_t ctl;
if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
case DRM_INST_HANDLER:
case DRM_UNINST_HANDLER:
return 0;
default:
return -EINVAL;
}
}
#endif /* __HAVE_DMA_IRQ */
#endif /* __HAVE_DMA */
......@@ -113,7 +113,6 @@
#define DRIVER_IOCTLS
#endif
#ifndef DRIVER_FOPS
#if LINUX_VERSION_CODE >= 0x020400
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
owner: THIS_MODULE, \
......@@ -126,19 +125,6 @@ static struct file_operations DRM(fops) = { \
fasync: DRM(fasync), \
poll: DRM(poll), \
}
#else
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
open: DRM(open), \
flush: DRM(flush), \
release: DRM(release), \
ioctl: DRM(ioctl), \
mmap: DRM(mmap), \
read: DRM(read), \
fasync: DRM(fasync), \
poll: DRM(poll), \
}
#endif
#endif
......@@ -201,10 +187,8 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
/* The DRM_IOCTL_DMA ioctl should be defined by the driver.
*/
#if __HAVE_DMA_IRQ
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
#endif
#endif
#if __REALLY_HAVE_AGP
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
......@@ -732,9 +716,6 @@ int DRM(open)( struct inode *inode, struct file *filp )
retcode = DRM(open_helper)( inode, filp, dev );
if ( !retcode ) {
#if LINUX_VERSION_CODE < 0x020333
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
spin_lock( &dev->count_lock );
if ( !dev->open_count++ ) {
......@@ -853,9 +834,6 @@ int DRM(release)( struct inode *inode, struct file *filp )
* End inline drm_release
*/
#if LINUX_VERSION_CODE < 0x020333
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
spin_lock( &dev->count_lock );
if ( !--dev->open_count ) {
......
......@@ -191,24 +191,8 @@ int DRM(write_string)(drm_device_t *dev, const char *s)
send -= count;
}
#if LINUX_VERSION_CODE < 0x020315 && !defined(KILLFASYNCHASTHREEPARAMETERS)
/* The extra parameter to kill_fasync was added in 2.3.21, and is
_not_ present in _stock_ 2.2.14 and 2.2.15. However, some
distributions patch 2.2.x kernels to add this parameter. The
Makefile.linux attempts to detect this addition and defines
KILLFASYNCHASTHREEPARAMETERS if three parameters are found. */
if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO);
#else
/* Parameter added in 2.3.21. */
#if LINUX_VERSION_CODE < 0x020400
if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO, POLL_IN);
#else
/* Type of first parameter changed in
Linux 2.4.0-test2... */
if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
#endif
#endif
DRM_DEBUG("waking\n");
wake_up_interruptible(&dev->buf_readers);
return 0;
......
......@@ -42,7 +42,7 @@ int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
DRM_MEM_BUFLISTS);
if(!bl->bufs) return -ENOMEM;
memset(bl->bufs, 0, sizeof(*bl->bufs));
bl->count = count;
bl->rp = bl->bufs;
bl->wp = bl->bufs;
......
......@@ -85,12 +85,7 @@ void DRM(mem_init)(void)
}
si_meminfo(&si);
#if LINUX_VERSION_CODE < 0x020317
/* Changed to page count in 2.3.23 */
DRM(ram_available) = si.totalram >> PAGE_SHIFT;
#else
DRM(ram_available) = si.totalram;
#endif
DRM(ram_used) = 0;
}
......@@ -257,12 +252,7 @@ unsigned long DRM(alloc_pages)(int order, int area)
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
#if LINUX_VERSION_CODE >= 0x020400
/* Argument type changed in 2.4.0-test6/pre8 */
mem_map_reserve(virt_to_page(addr));
#else
mem_map_reserve(MAP_NR(addr));
#endif
}
return address;
......@@ -283,12 +273,7 @@ void DRM(free_pages)(unsigned long address, int order, int area)
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
#if LINUX_VERSION_CODE >= 0x020400
/* Argument type changed in 2.4.0-test6/pre8 */
mem_map_unreserve(virt_to_page(addr));
#else
mem_map_unreserve(MAP_NR(addr));
#endif
}
free_pages(address, order);
}
......@@ -329,6 +314,29 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
return pt;
}
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = ioremap_nocache(offset, size))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&DRM(mem_lock));
return pt;
}
void DRM(ioremapfree)(void *pt, unsigned long size)
{
int alloc_count;
......
......@@ -94,6 +94,8 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
return -ENOMEM;
}
memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
entry->busaddr = DRM(alloc)( pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
if ( !entry->busaddr ) {
......
......@@ -31,10 +31,6 @@
#define __NO_VERSION__
#include "drmP.h"
#if LINUX_VERSION_CODE < 0x020400
#include "stubsupport-pre24.h"
#endif
#define DRM_STUB_MAXCARDS 16 /* Enough for one machine */
static struct drm_stub_list {
......@@ -70,9 +66,7 @@ static int DRM(stub_open)(struct inode *inode, struct file *filp)
}
static struct file_operations DRM(stub_fops) = {
#if LINUX_VERSION_CODE >= 0x020400
owner: THIS_MODULE,
#endif
open: DRM(stub_open)
};
......
......@@ -56,16 +56,9 @@ struct vm_operations_struct DRM(vm_sg_ops) = {
close: DRM(vm_close),
};
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused)
#endif
int write_access)
{
#if __REALLY_HAVE_AGP
drm_file_t *priv = vma->vm_file->private_data;
......@@ -122,11 +115,7 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
baddr, __va(agpmem->memory->memory[offset]), offset);
#if LINUX_VERSION_CODE < 0x020317
return page_address(page);
#else
return page;
#endif
}
vm_nopage_error:
#endif /* __REALLY_HAVE_AGP */
......@@ -134,22 +123,11 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
return NOPAGE_SIGBUS; /* Disallow mremap */
}
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused)
#endif
int write_access)
{
#if LINUX_VERSION_CODE >= 0x020300
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
#else
drm_map_t *map = (drm_map_t *)vma->vm_pte;
#endif
unsigned long offset;
unsigned long i;
struct page *page;
......@@ -165,11 +143,7 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
get_page(page);
DRM_DEBUG("shm_nopage 0x%lx\n", address);
#if LINUX_VERSION_CODE < 0x020317
return page_address(page);
#else
return page;
#endif
}
/* Special close routine which deletes map information if we are the last
......@@ -188,25 +162,14 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
#if LINUX_VERSION_CODE < 0x020333
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_dec(&dev->vma_count);
#if LINUX_VERSION_CODE >= 0x020300
map = vma->vm_private_data;
#else
map = vma->vm_pte;
#endif
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
#else
if (pt->vma->vm_pte == map) found_maps++;
#endif
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
......@@ -259,16 +222,9 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
up(&dev->struct_sem);
}
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused)
#endif
int write_access)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
......@@ -288,30 +244,15 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
get_page(page);
DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
#if LINUX_VERSION_CODE < 0x020317
return page_address(page);
#else
DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
return page;
#endif
}
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused)
#endif
int write_access)
{
#if LINUX_VERSION_CODE >= 0x020300
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
#else
drm_map_t *map = (drm_map_t *)vma->vm_pte;
#endif
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_sg_mem_t *entry = dev->sg;
......@@ -331,11 +272,7 @@ struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
page = entry->pagelist[page_offset];
get_page(page);
#if LINUX_VERSION_CODE < 0x020317
return page_address(page);
#else
return page;
#endif
}
void DRM(vm_open)(struct vm_area_struct *vma)
......@@ -347,10 +284,6 @@ void DRM(vm_open)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
#if LINUX_VERSION_CODE < 0x020333
/* The map can exist after the fd is closed. */
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
......@@ -371,9 +304,6 @@ void DRM(vm_close)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
#if LINUX_VERSION_CODE < 0x020333
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_dec(&dev->vma_count);
down(&dev->struct_sem);
......@@ -412,13 +342,13 @@ int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
unlock_kernel();
vma->vm_ops = &DRM(vm_dma_ops);
vma->vm_flags |= VM_RESERVED; /* Don't swap */
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
/* In Linux 2.2.3 and above, this is
handled in do_mmap() in mm/mmap.c. */
++filp->f_count;
#if LINUX_VERSION_CODE <= 0x020414
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */
DRM(vm_open)(vma);
return 0;
......@@ -521,12 +451,12 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
}
offset = DRIVER_GET_REG_OFS();
#ifdef __sparc__
if (io_remap_page_range(vma, vma->vm_start,
if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
VM_OFFSET(vma) + offset,
vma->vm_end - vma->vm_start,
vma->vm_page_prot, 0))
#else
if (remap_page_range(vma, vma->vm_start,
if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
VM_OFFSET(vma) + offset,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
......@@ -540,34 +470,33 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
break;
case _DRM_SHM:
vma->vm_ops = &DRM(vm_shm_ops);
#if LINUX_VERSION_CODE >= 0x020300
vma->vm_private_data = (void *)map;
#else
vma->vm_pte = (unsigned long)map;
#endif
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
#if LINUX_VERSION_CODE <= 0x020414
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED;
#endif
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &DRM(vm_sg_ops);
#if LINUX_VERSION_CODE >= 0x020300
vma->vm_private_data = (void *)map;
#if LINUX_VERSION_CODE <= 0x020414
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_pte = (unsigned long)map;
vma->vm_flags |= VM_RESERVED;
#endif
vma->vm_flags |= VM_RESERVED;
break;
default:
return -EINVAL; /* This should never happen. */
}
#if LINUX_VERSION_CODE <= 0x020414
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
/* In Linux 2.2.3 and above, this is
handled in do_mmap() in mm/mmap.c. */
++filp->f_count;
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */
DRM(vm_open)(vma);
return 0;
......
......@@ -41,6 +41,8 @@
/* DMA customization:
*/
#define __HAVE_DMA 1
#define __HAVE_AGP 1
#define __MUST_HAVE_AGP 0
#define __HAVE_OLD_DMA 1
#define __HAVE_PCI_DMA 1
......@@ -61,33 +63,61 @@
#define __HAVE_DMA_QUIESCENT 1
#define DRIVER_DMA_QUIESCENT() do { \
/* FIXME ! */ \
gamma_dma_quiescent_dual(dev); \
gamma_dma_quiescent_single(dev); \
return 0; \
} while (0)
#define __HAVE_DMA_IRQ 1
#define __HAVE_DMA_IRQ_BH 1
#if 1
#define DRIVER_PREINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000000 ); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 ); \
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); \
} while (0)
#define DRIVER_POSTINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3); \
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); \
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); \
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); \
} while (0)
#else
#define DRIVER_POSTINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002000 ); \
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000004 ); \
} while (0)
#define DRIVER_PREINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
GAMMA_WRITE( GAMMA_GCOMMANDMODE, GAMMA_QUEUED_DMA_MODE );\
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );\
} while (0)
#endif
#define DRIVER_UNINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3); \
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); \
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); \
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); \
} while (0)
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
((drm_gamma_private_t *)((dev)->dev_private))->buffers
#endif /* __GAMMA_H__ */
This diff is collapsed.
#ifndef _GAMMA_DRM_H_
#define _GAMMA_DRM_H_
typedef struct _drm_gamma_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_gamma_tex_region_t;
typedef struct {
unsigned int GDeltaMode;
unsigned int GDepthMode;
unsigned int GGeometryMode;
unsigned int GTransformMode;
} drm_gamma_context_regs_t;
typedef struct _drm_gamma_sarea {
drm_gamma_context_regs_t context_state;
unsigned int dirty;
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
* age different to the one you set, then you are mistaken and
* it has been stolen by another client. If global texAge
* hasn't changed, there is no need to walk the list.
*
* These regions can be used as a proxy for the fine-grained
* texture information of other clients - by maintaining them
* in the same lru which is used to age their own textures,
* clients have an approximate lru for the whole of global
* texture space, and can make informed decisions as to which
* areas to kick out. There is no need to choose whether to
* kick out your own texture or someone else's - simply eject
* them all in LRU order.
*/
#define GAMMA_NR_TEX_REGIONS 64
drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
int vertex_prim;
} drm_gamma_sarea_t;
typedef struct drm_gamma_copy {
unsigned int DMAOutputAddress;
unsigned int DMAOutputCount;
unsigned int DMAReadGLINTSource;
unsigned int DMARectangleWriteAddress;
unsigned int DMARectangleWriteLinePitch;
unsigned int DMARectangleWrite;
unsigned int DMARectangleReadAddress;
unsigned int DMARectangleReadLinePitch;
unsigned int DMARectangleRead;
unsigned int DMARectangleReadTarget;
} drm_gamma_copy_t;
typedef struct drm_gamma_init {
enum {
GAMMA_INIT_DMA = 0x01,
GAMMA_CLEANUP_DMA = 0x02
} func;
int sarea_priv_offset;
int pcimode;
unsigned int mmio0;
unsigned int mmio1;
unsigned int mmio2;
unsigned int mmio3;
unsigned int buffers_offset;
} drm_gamma_init_t;
#endif /* _GAMMA_DRM_H_ */
......@@ -38,15 +38,19 @@
#define DRIVER_NAME "gamma"
#define DRIVER_DESC "3DLabs gamma"
#define DRIVER_DATE "20010216"
#define DRIVER_DATE "20010624"
#define DRIVER_MAJOR 1
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 }
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_GAMMA_INIT)] = { gamma_dma_init, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_GAMMA_COPY)] = { gamma_dma_copy, 1, 1 }
#define IOCTL_TABLE_NAME DRM(ioctls)
#define IOCTL_FUNC_NAME DRM(ioctl)
#define __HAVE_COUNTERS 5
#define __HAVE_COUNTER6 _DRM_STAT_IRQ
......@@ -57,6 +61,7 @@
#include "drm_auth.h"
#include "drm_agpsupport.h"
#include "drm_bufs.h"
#include "drm_context.h"
#include "drm_dma.h"
......@@ -82,7 +87,6 @@ static int __init gamma_options( char *str )
__setup( DRIVER_NAME "=", gamma_options );
#endif
#include "drm_fops.h"
#include "drm_init.h"
#include "drm_ioctl.h"
......
......@@ -32,8 +32,9 @@
#ifndef _GAMMA_DRV_H_
#define _GAMMA_DRV_H_
typedef struct drm_gamma_private {
drm_gamma_sarea_t *sarea_priv;
drm_map_t *sarea;
drm_map_t *buffers;
drm_map_t *mmio0;
drm_map_t *mmio1;
......@@ -51,6 +52,11 @@ do { \
} \
} while (0)
/* gamma_dma.c */
extern int gamma_dma_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int gamma_dma_copy( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern void gamma_dma_ready(drm_device_t *dev);
extern void gamma_dma_quiescent_single(drm_device_t *dev);
......@@ -63,6 +69,7 @@ extern int gamma_dma(struct inode *inode, struct file *filp,
extern int gamma_find_devices(void);
extern int gamma_found(void);
#define GLINT_DRI_BUF_COUNT 256
#define GAMMA_OFF(reg) \
((reg < 0x1000) \
......@@ -78,7 +85,6 @@ extern int gamma_found(void);
((reg < 0x10000) ? dev_priv->mmio1->handle : \
((reg < 0x11000) ? dev_priv->mmio2->handle : \
dev_priv->mmio3->handle))))
#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
#define GAMMA_READ(reg) GAMMA_DEREF(reg)
......@@ -91,9 +97,11 @@ extern int gamma_found(void);
#define GAMMA_FILTERMODE 0x8c00
#define GAMMA_GCOMMANDINTFLAGS 0x0c50
#define GAMMA_GCOMMANDMODE 0x0c40
#define GAMMA_QUEUED_DMA_MODE 1<<1
#define GAMMA_GCOMMANDSTATUS 0x0c60
#define GAMMA_GDELAYTIMER 0x0c38
#define GAMMA_GDMACONTROL 0x0060
#define GAMMA_USE_AGP 1<<1
#define GAMMA_GINTENABLE 0x0808
#define GAMMA_GINTFLAGS 0x0810
#define GAMMA_INFIFOSPACE 0x0018
......@@ -101,5 +109,12 @@ extern int gamma_found(void);
#define GAMMA_OUTPUTFIFO 0x2000
#define GAMMA_SYNC 0x8c40
#define GAMMA_SYNC_TAG 0x0188
#define GAMMA_PAGETABLEADDR 0x0C00
#define GAMMA_PAGETABLELENGTH 0x0C08
#define GAMMA_PASSTHROUGH 0x1FE
#define GAMMA_DMAADDRTAG 0x530
#define GAMMA_DMACOUNTTAG 0x531
#define GAMMA_COMMANDINTTAG 0x532
#endif
......@@ -60,50 +60,10 @@
i810_dma_quiescent( dev ); \
} while (0)
#define __HAVE_DMA_IRQ 1
#define __HAVE_DMA_IRQ_BH 1
#define __HAVE_SHARED_IRQ 1
#define DRIVER_PREINSTALL() do { \
drm_i810_private_t *dev_priv = \
(drm_i810_private_t *)dev->dev_private; \
u16 tmp; \
tmp = I810_READ16( I810REG_HWSTAM ); \
tmp = tmp & 0x6000; \
I810_WRITE16( I810REG_HWSTAM, tmp ); \
\
tmp = I810_READ16( I810REG_INT_MASK_R ); \
tmp = tmp & 0x6000; /* Unmask interrupts */ \
I810_WRITE16( I810REG_INT_MASK_R, tmp ); \
tmp = I810_READ16( I810REG_INT_ENABLE_R ); \
tmp = tmp & 0x6000; /* Disable all interrupts */ \
I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \
} while (0)
#define DRIVER_POSTINSTALL() do { \
drm_i810_private_t *dev_priv = \
(drm_i810_private_t *)dev->dev_private; \
u16 tmp; \
tmp = I810_READ16( I810REG_INT_ENABLE_R ); \
tmp = tmp & 0x6000; \
tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \
I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \
} while (0)
#define DRIVER_UNINSTALL() do { \
drm_i810_private_t *dev_priv = \
(drm_i810_private_t *)dev->dev_private; \
u16 tmp; \
if ( dev_priv ) { \
tmp = I810_READ16( I810REG_INT_IDENTITY_R ); \
tmp = tmp & ~(0x6000); /* Clear all interrupts */ \
if ( tmp != 0 ) \
I810_WRITE16( I810REG_INT_IDENTITY_R, tmp ); \
\
tmp = I810_READ16( I810REG_INT_ENABLE_R ); \
tmp = tmp & 0x6000; /* Disable all interrupts */ \
I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \
} \
} while (0)
/* Don't need an irq any more. The template code will make sure that
* a noop stub is generated for compatibility.
*/
#define __HAVE_DMA_IRQ 0
/* Buffer customization:
*/
......
This diff is collapsed.
......@@ -88,6 +88,8 @@
#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
#define I810_TEX_SETUP_SIZE 8
/* Flags for clear ioctl
*/
#define I810_FRONT 0x1
#define I810_BACK 0x2
#define I810_DEPTH 0x4
......@@ -112,6 +114,8 @@ typedef struct _drm_i810_init {
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int overlay_offset;
unsigned int overlay_physical;
unsigned int w;
unsigned int h;
unsigned int pitch;
......@@ -189,6 +193,17 @@ typedef struct _drm_i810_copy_t {
void *address; /* Address to copy from */
} drm_i810_copy_t;
#define PR_TRIANGLES (0x0<<18)
#define PR_TRISTRIP_0 (0x1<<18)
#define PR_TRISTRIP_1 (0x2<<18)
#define PR_TRIFAN (0x3<<18)
#define PR_POLYGON (0x4<<18)
#define PR_LINES (0x5<<18)
#define PR_LINESTRIP (0x6<<18)
#define PR_RECTS (0x7<<18)
#define PR_MASK (0x7<<18)
typedef struct drm_i810_dma {
void *virtual;
int request_idx;
......@@ -196,4 +211,18 @@ typedef struct drm_i810_dma {
int granted;
} drm_i810_dma_t;
typedef struct _drm_i810_overlay_t {
unsigned int offset; /* Address of the Overlay Regs */
unsigned int physical;
} drm_i810_overlay_t;
typedef struct _drm_i810_mc {
int idx; /* buffer index */
int used; /* nr bytes in use */
int num_blocks; /* number of GFXBlocks */
int *length; /* List of lengths for GFXBlocks (FUTURE)*/
unsigned int last_render; /* Last Render Request */
} drm_i810_mc_t;
#endif /* _I810_DRM_H_ */
......@@ -39,11 +39,19 @@
#define DRIVER_NAME "i810"
#define DRIVER_DESC "Intel i810"
#define DRIVER_DATE "20010616"
#define DRIVER_DATE "20020211"
/* Interface history
*
* 1.1 - XFree86 4.1
* 1.2 - XvMC interfaces
* - XFree86 4.2
* 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility)
* - Remove requirement for interrupt (leave stubs again)
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
#define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 1
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, \
......@@ -54,7 +62,12 @@
[DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 }
#define __HAVE_COUNTERS 4
......
......@@ -64,8 +64,6 @@ typedef struct drm_i810_private {
unsigned long hw_status_page;
unsigned long counter;
atomic_t flush_done;
wait_queue_head_t flush_queue; /* Processes waiting until flush */
drm_buf_t *mmap_buffer;
......@@ -73,8 +71,11 @@ typedef struct drm_i810_private {
int back_offset;
int depth_offset;
int overlay_offset;
int overlay_physical;
int w, h;
int pitch;
} drm_i810_private_t;
/* i810_dma.c */
......@@ -89,15 +90,29 @@ extern void i810_reclaim_buffers(drm_device_t *dev, pid_t pid);
extern int i810_getage(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
/* Obsolete:
*/
extern int i810_copybuf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Obsolete:
*/
extern int i810_docopy(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i810_dma_quiescent(drm_device_t *dev);
extern int i810_rstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_ov0_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_fstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_ov0_flip(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_dma_mc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
#define I810_VERBOSE 0
extern void i810_dma_quiescent(drm_device_t *dev);
int i810_dma_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
......
/* i830.h -- Intel I830 DRM template customization -*- linux-c -*-
* Created: Thu Feb 15 00:01:12 2001 by gareth@valinux.com
*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*/
#ifndef __I830_H__
#define __I830_H__
/* This remains constant for all DRM template files.
*/
#define DRM(x) i830_##x
/* General customization:
*/
#define __HAVE_AGP 1
#define __MUST_HAVE_AGP 1
#define __HAVE_MTRR 1
#define __HAVE_CTX_BITMAP 1
/* Driver customization:
*/
#define __HAVE_RELEASE 1
#define DRIVER_RELEASE() do { \
i830_reclaim_buffers( dev, priv->pid ); \
} while (0)
/* DMA customization:
*/
#define __HAVE_DMA 1
#define __HAVE_DMA_QUEUE 1
#define __HAVE_DMA_WAITLIST 1
#define __HAVE_DMA_RECLAIM 1
#define __HAVE_DMA_QUIESCENT 1
#define DRIVER_DMA_QUIESCENT() do { \
i830_dma_quiescent( dev ); \
} while (0)
#define __HAVE_DMA_IRQ 1
#define __HAVE_DMA_IRQ_BH 1
#define __HAVE_SHARED_IRQ 1
#define DRIVER_PREINSTALL() do { \
drm_i830_private_t *dev_priv = \
(drm_i830_private_t *)dev->dev_private; \
u16 tmp; \
tmp = I830_READ16( I830REG_HWSTAM ); \
tmp = tmp & 0x6000; \
I830_WRITE16( I830REG_HWSTAM, tmp ); \
\
tmp = I830_READ16( I830REG_INT_MASK_R ); \
tmp = tmp & 0x6000; /* Unmask interrupts */ \
I830_WRITE16( I830REG_INT_MASK_R, tmp ); \
tmp = I830_READ16( I830REG_INT_ENABLE_R ); \
tmp = tmp & 0x6000; /* Disable all interrupts */ \
I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \
} while (0)
#define DRIVER_POSTINSTALL() do { \
drm_i830_private_t *dev_priv = \
(drm_i830_private_t *)dev->dev_private; \
u16 tmp; \
tmp = I830_READ16( I830REG_INT_ENABLE_R ); \
tmp = tmp & 0x6000; \
tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \
I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \
} while (0)
#define DRIVER_UNINSTALL() do { \
drm_i830_private_t *dev_priv = \
(drm_i830_private_t *)dev->dev_private; \
u16 tmp; \
if ( dev_priv ) { \
tmp = I830_READ16( I830REG_INT_IDENTITY_R ); \
tmp = tmp & ~(0x6000); /* Clear all interrupts */ \
if ( tmp != 0 ) \
I830_WRITE16( I830REG_INT_IDENTITY_R, tmp ); \
\
tmp = I830_READ16( I830REG_INT_ENABLE_R ); \
tmp = tmp & 0x6000; /* Disable all interrupts */ \
I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \
} \
} while (0)
/* Buffer customization:
*/
#define DRIVER_BUF_PRIV_T drm_i830_buf_priv_t
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
((drm_i830_private_t *)((dev)->dev_private))->buffer_map
#endif
This diff is collapsed.
#ifndef _I830_DRM_H_
#define _I830_DRM_H_
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
#ifndef _I830_DEFINES_
#define _I830_DEFINES_
#define I830_DMA_BUF_ORDER 12
#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
#define I830_DMA_BUF_NR 256
#define I830_NR_SAREA_CLIPRECTS 8
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
#define I830_NR_TEX_REGIONS 64
#define I830_LOG_MIN_TEX_REGION_SIZE 16
/* if defining I830_ENABLE_4_TEXTURES, do it in i830_3d_reg.h, too */
#if !defined(I830_ENABLE_4_TEXTURES)
#define I830_TEXTURE_COUNT 2
#define I830_TEXBLEND_COUNT 2 /* always same as TEXTURE_COUNT? */
#else /* defined(I830_ENABLE_4_TEXTURES) */
#define I830_TEXTURE_COUNT 4
#define I830_TEXBLEND_COUNT 4 /* always same as TEXTURE_COUNT? */
#endif /* I830_ENABLE_4_TEXTURES */
#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
#define I830_UPLOAD_CTX 0x1
#define I830_UPLOAD_BUFFERS 0x2
#define I830_UPLOAD_CLIPRECTS 0x4
#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
#define I830_UPLOAD_TEX0 0x10000
#define I830_UPLOAD_TEX1 0x20000
#define I830_UPLOAD_TEX2 0x40000
#define I830_UPLOAD_TEX3 0x80000
#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
#define I830_UPLOAD_TEX_MASK 0xf0000
#define I830_UPLOAD_TEXBLEND0 0x100000
#define I830_UPLOAD_TEXBLEND1 0x200000
#define I830_UPLOAD_TEXBLEND2 0x400000
#define I830_UPLOAD_TEXBLEND3 0x800000
#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
*/
/* Destbuffer state
* - backbuffer linear offset and pitch -- invarient in the current dri
* - zbuffer linear offset and pitch -- also invarient
* - drawing origin in back and depth buffers.
*
* Keep the depth/back buffer state here to acommodate private buffers
* in the future.
*/
#define I830_DESTREG_CBUFADDR 0
/* Invarient */
#define I830_DESTREG_DBUFADDR 1
#define I830_DESTREG_DV0 2
#define I830_DESTREG_DV1 3
#define I830_DESTREG_SENABLE 4
#define I830_DESTREG_SR0 5
#define I830_DESTREG_SR1 6
#define I830_DESTREG_SR2 7
#define I830_DESTREG_DR0 8
#define I830_DESTREG_DR1 9
#define I830_DESTREG_DR2 10
#define I830_DESTREG_DR3 11
#define I830_DESTREG_DR4 12
#define I830_DEST_SETUP_SIZE 13
/* Context state
*/
#define I830_CTXREG_STATE1 0
#define I830_CTXREG_STATE2 1
#define I830_CTXREG_STATE3 2
#define I830_CTXREG_STATE4 3
#define I830_CTXREG_STATE5 4
#define I830_CTXREG_IALPHAB 5
#define I830_CTXREG_STENCILTST 6
#define I830_CTXREG_ENABLES_1 7
#define I830_CTXREG_ENABLES_2 8
#define I830_CTXREG_AA 9
#define I830_CTXREG_FOGCOLOR 10
#define I830_CTXREG_BLENDCOLR0 11
#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
#define I830_CTXREG_VF 13
#define I830_CTXREG_VF2 14
#define I830_CTXREG_MCSB0 15
#define I830_CTXREG_MCSB1 16
#define I830_CTX_SETUP_SIZE 17
/* Texture state (per tex unit)
*/
#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
#define I830_TEXREG_MI1 1
#define I830_TEXREG_MI2 2
#define I830_TEXREG_MI3 3
#define I830_TEXREG_MI4 4
#define I830_TEXREG_MI5 5
#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
#define I830_TEX_SETUP_SIZE 10
#define I830_FRONT 0x1
#define I830_BACK 0x2
#define I830_DEPTH 0x4
#endif /* _I830_DEFINES_ */
typedef struct _drm_i830_init {
enum {
I830_INIT_DMA = 0x01,
I830_CLEANUP_DMA = 0x02
} func;
unsigned int mmio_offset;
unsigned int buffers_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
unsigned int ring_size;
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int w;
unsigned int h;
unsigned int pitch;
unsigned int pitch_bits;
unsigned int back_pitch;
unsigned int depth_pitch;
unsigned int cpp;
} drm_i830_init_t;
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
typedef struct _drm_i830_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_i830_tex_region_t;
typedef struct _drm_i830_sarea {
unsigned int ContextState[I830_CTX_SETUP_SIZE];
unsigned int BufferState[I830_DEST_SETUP_SIZE];
unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
unsigned int Palette[2][256];
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[I830_NR_SAREA_CLIPRECTS];
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
* age different to the one you set, then you are mistaken and
* it has been stolen by another client. If global texAge
* hasn't changed, there is no need to walk the list.
*
* These regions can be used as a proxy for the fine-grained
* texture information of other clients - by maintaining them
* in the same lru which is used to age their own textures,
* clients have an approximate lru for the whole of global
* texture space, and can make informed decisions as to which
* areas to kick out. There is no need to choose whether to
* kick out your own texture or someone else's - simply eject
* them all in LRU order.
*/
drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS+1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
int vertex_prim;
} drm_i830_sarea_t;
typedef struct _drm_i830_clear {
int clear_color;
int clear_depth;
int flags;
unsigned int clear_colormask;
unsigned int clear_depthmask;
} drm_i830_clear_t;
/* These may be placeholders if we have more cliprects than
* I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
* false, indicating that the buffer will be dispatched again with a
* new set of cliprects.
*/
typedef struct _drm_i830_vertex {
int idx; /* buffer index */
int used; /* nr bytes in use */
int discard; /* client is finished with the buffer? */
} drm_i830_vertex_t;
typedef struct _drm_i830_copy_t {
int idx; /* buffer index */
int used; /* nr bytes in use */
void *address; /* Address to copy from */
} drm_i830_copy_t;
typedef struct drm_i830_dma {
void *virtual;
int request_idx;
int request_size;
int granted;
} drm_i830_dma_t;
#endif /* _I830_DRM_H_ */
/* i830_drv.c -- I810 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Abraham vd Merwe <abraham@2d3d.co.za>
*/
#include <linux/config.h>
#include "i830.h"
#include "drmP.h"
#include "i830_drv.h"
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "i830"
#define DRIVER_DESC "Intel 830M"
#define DRIVER_DATE "20011004"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 },
#define __HAVE_COUNTERS 4
#define __HAVE_COUNTER6 _DRM_STAT_IRQ
#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY
#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
#define __HAVE_COUNTER9 _DRM_STAT_DMA
#include "drm_agpsupport.h"
#include "drm_auth.h"
#include "drm_bufs.h"
#include "drm_context.h"
#include "drm_dma.h"
#include "drm_drawable.h"
#include "drm_drv.h"
#ifndef MODULE
/* DRM(options) is called by the kernel to parse command-line options
* passed via the boot-loader (e.g., LILO). It calls the insmod option
* routine, drm_parse_drm.
*/
/* JH- We have to hand expand the string ourselves because of the cpp. If
* anyone can think of a way that we can fit into the __setup macro without
* changing it, then please send the solution my way.
*/
static int __init i830_options( char *str )
{
DRM(parse_options)( str );
return 1;
}
__setup( DRIVER_NAME "=", i830_options );
#endif
#include "drm_fops.h"
#include "drm_init.h"
#include "drm_ioctl.h"
#include "drm_lock.h"
#include "drm_lists.h"
#include "drm_memory.h"
#include "drm_proc.h"
#include "drm_vm.h"
#include "drm_stub.h"
/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
#ifndef _I830_DRV_H_
#define _I830_DRV_H_
typedef struct drm_i830_buf_priv {
u32 *in_use;
int my_use_idx;
int currently_mapped;
void *virtual;
void *kernel_virtual;
int map_count;
struct vm_area_struct *vma;
} drm_i830_buf_priv_t;
typedef struct _drm_i830_ring_buffer{
int tail_mask;
unsigned long Start;
unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
} drm_i830_ring_buffer_t;
typedef struct drm_i830_private {
drm_map_t *sarea_map;
drm_map_t *buffer_map;
drm_map_t *mmio_map;
drm_i830_sarea_t *sarea_priv;
drm_i830_ring_buffer_t ring;
unsigned long hw_status_page;
unsigned long counter;
atomic_t flush_done;
wait_queue_head_t flush_queue; /* Processes waiting until flush */
drm_buf_t *mmap_buffer;
u32 front_di1, back_di1, zi1;
int back_offset;
int depth_offset;
int w, h;
int pitch;
int back_pitch;
int depth_pitch;
unsigned int cpp;
} drm_i830_private_t;
/* i830_dma.c */
extern int i830_dma_schedule(drm_device_t *dev, int locked);
extern int i830_getbuf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i830_reclaim_buffers(drm_device_t *dev, pid_t pid);
extern int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
extern int i830_copybuf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_docopy(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i830_dma_quiescent(drm_device_t *dev);
extern int i830_dma_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_swap_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
#define I830_VERBOSE 0
#define I830_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle)
#define I830_ADDR(reg) (I830_BASE(reg) + reg)
#define I830_DEREF(reg) *(__volatile__ int *)I830_ADDR(reg)
#define I830_READ(reg) I830_DEREF(reg)
#define I830_WRITE(reg,val) do { I830_DEREF(reg) = val; } while (0)
#define I830_DEREF16(reg) *(__volatile__ u16 *)I830_ADDR(reg)
#define I830_READ16(reg) I830_DEREF16(reg)
#define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0)
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
#define INST_PARSER_CLIENT 0x00000000
#define INST_OP_FLUSH 0x02000000
#define INST_FLUSH_MAP_CACHE 0x00000001
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7)
#define I830REG_HWSTAM 0x02098
#define I830REG_INT_IDENTITY_R 0x020a4
#define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_ENABLE_R 0x020a0
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define TAIL_ADDR 0x000FFFF8
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x00FFFFF8
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x000FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
#define SCI_YMAX_MASK (0xffff<<16)
#define SCI_XMAX_MASK (0xffff<<0)
#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define BR00_BITBLT_CLIENT 0x40000000
#define BR00_OP_COLOR_BLT 0x10000000
#define BR00_OP_SRC_COPY_BLT 0x10C00000
#define BR13_SOLID_PATTERN 0x80000000
#define BUF_3D_ID_COLOR_BACK (0x3<<24)
#define BUF_3D_ID_DEPTH (0x7<<24)
#define BUF_3D_USE_FENCE (1<<23)
#define BUF_3D_PITCH(x) (((x)/4)<<2)
#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
#define MAP_PALETTE_BOTH (1<<11)
#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
#define XY_COLOR_BLT_WRITE_RGB (1<<20)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_NON_SECURE (1)
#endif
......@@ -163,6 +163,9 @@ static inline void mga_g400_emit_tex0( drm_mga_private_t *dev_priv )
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
DMA_LOCALS;
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
BEGIN_DMA( 6 );
DMA_BLOCK( MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
......@@ -204,6 +207,9 @@ static inline void mga_g400_emit_tex1( drm_mga_private_t *dev_priv )
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
DMA_LOCALS;
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
BEGIN_DMA( 5 );
DMA_BLOCK( MGA_TEXCTL2, (tex->texctl2 |
......@@ -272,6 +278,8 @@ static inline void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
unsigned int pipe = sarea_priv->warp_pipe;
DMA_LOCALS;
/* printk("mga_g400_emit_pipe %x\n", pipe); */
BEGIN_DMA( 10 );
DMA_BLOCK( MGA_WIADDR2, MGA_WMODE_SUSPEND,
......
......@@ -39,11 +39,11 @@
#define DRIVER_NAME "r128"
#define DRIVER_DESC "ATI Rage 128"
#define DRIVER_DATE "20010405"
#define DRIVER_DATE "20010917"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 6
#define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \
......
......@@ -1519,10 +1519,75 @@ int r128_cce_indirect( struct inode *inode, struct file *filp,
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_indirect_t indirect;
#if 0
RING_LOCALS;
#endif
LOCK_TEST_WITH_RETURN( dev );
/* Indirect buffer firing is not supported at this time.
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return -EINVAL;
}
if ( copy_from_user( &indirect, (drm_r128_indirect_t *)arg,
sizeof(indirect) ) )
return -EFAULT;
DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
indirect.idx, indirect.start,
indirect.end, indirect.discard );
if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
DRM_ERROR( "buffer index %d (of %d max)\n",
indirect.idx, dma->buf_count - 1 );
return -EINVAL;
}
buf = dma->buflist[indirect.idx];
buf_priv = buf->dev_private;
if ( buf->pid != current->pid ) {
DRM_ERROR( "process %d using buffer owned by %d\n",
current->pid, buf->pid );
return -EINVAL;
}
if ( buf->pending ) {
DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
return -EINVAL;
}
if ( indirect.start < buf->used ) {
DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
indirect.start, buf->used );
return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv );
buf->used = indirect.end;
buf_priv->discard = indirect.discard;
#if 0
/* Wait for the 3D stream to idle before the indirect buffer
* containing 2D acceleration commands is processed.
*/
return -EINVAL;
BEGIN_RING( 2 );
RADEON_WAIT_UNTIL_3D_IDLE();
ADVANCE_RING();
#endif
/* Dispatch the indirect buffer full of commands from the
* X server. This is insecure and is thus only available to
* privileged clients.
*/
r128_cce_dispatch_indirect( dev, buf, indirect.start, indirect.end );
return 0;
}
......@@ -744,17 +744,17 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
* and screwing with the clear operation.
*/
dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
RADEON_Z_ENABLE |
(dev_priv->color_fmt << 10) |
RADEON_ZBLOCK16);
(1<<15));
dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt |
RADEON_Z_TEST_ALWAYS |
RADEON_STENCIL_TEST_ALWAYS |
RADEON_STENCIL_S_FAIL_KEEP |
RADEON_STENCIL_ZPASS_KEEP |
RADEON_STENCIL_ZFAIL_KEEP |
RADEON_Z_WRITE_ENABLE);
dev_priv->depth_clear.rb3d_zstencilcntl =
(dev_priv->depth_fmt |
RADEON_Z_TEST_ALWAYS |
RADEON_STENCIL_TEST_ALWAYS |
RADEON_STENCIL_S_FAIL_REPLACE |
RADEON_STENCIL_ZPASS_REPLACE |
RADEON_STENCIL_ZFAIL_REPLACE |
RADEON_Z_WRITE_ENABLE);
dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
RADEON_BFACE_SOLID |
......@@ -964,9 +964,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
radeon_cp_load_microcode( dev_priv );
radeon_cp_init_ring_buffer( dev, dev_priv );
#if ROTATE_BUFS
dev_priv->last_buf = 0;
#endif
dev->dev_private = (void *)dev_priv;
......@@ -1146,116 +1144,27 @@ int radeon_engine_reset( struct inode *inode, struct file *filp,
* Fullscreen mode
*/
static int radeon_do_init_pageflip( drm_device_t *dev )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "%s\n", __FUNCTION__ );
dev_priv->crtc_offset = RADEON_READ( RADEON_CRTC_OFFSET );
dev_priv->crtc_offset_cntl = RADEON_READ( RADEON_CRTC_OFFSET_CNTL );
RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->front_offset );
RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL,
dev_priv->crtc_offset_cntl |
RADEON_CRTC_OFFSET_FLIP_CNTL );
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
return 0;
}
int radeon_do_cleanup_pageflip( drm_device_t *dev )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "%s\n", __FUNCTION__ );
RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->crtc_offset );
RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl );
dev_priv->page_flipping = 0;
dev_priv->current_page = 0;
return 0;
}
/* KW: Deprecated to say the least:
*/
int radeon_fullscreen( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_radeon_fullscreen_t fs;
LOCK_TEST_WITH_RETURN( dev );
if ( copy_from_user( &fs, (drm_radeon_fullscreen_t *)arg,
sizeof(fs) ) )
return -EFAULT;
switch ( fs.func ) {
case RADEON_INIT_FULLSCREEN:
return radeon_do_init_pageflip( dev );
case RADEON_CLEANUP_FULLSCREEN:
return radeon_do_cleanup_pageflip( dev );
}
return -EINVAL;
return 0;
}
/* ================================================================
* Freelist management
*/
#define RADEON_BUFFER_USED 0xffffffff
#define RADEON_BUFFER_FREE 0
#if 0
static int radeon_freelist_init( drm_device_t *dev )
{
drm_device_dma_t *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_buf_t *buf;
drm_radeon_buf_priv_t *buf_priv;
drm_radeon_freelist_t *entry;
int i;
dev_priv->head = DRM(alloc)( sizeof(drm_radeon_freelist_t),
DRM_MEM_DRIVER );
if ( dev_priv->head == NULL )
return -ENOMEM;
memset( dev_priv->head, 0, sizeof(drm_radeon_freelist_t) );
dev_priv->head->age = RADEON_BUFFER_USED;
for ( i = 0 ; i < dma->buf_count ; i++ ) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
entry = DRM(alloc)( sizeof(drm_radeon_freelist_t),
DRM_MEM_DRIVER );
if ( !entry ) return -ENOMEM;
entry->age = RADEON_BUFFER_FREE;
entry->buf = buf;
entry->prev = dev_priv->head;
entry->next = dev_priv->head->next;
if ( !entry->next )
dev_priv->tail = entry;
buf_priv->discard = 0;
buf_priv->dispatched = 0;
buf_priv->list_entry = entry;
dev_priv->head->next = entry;
if ( dev_priv->head->next )
dev_priv->head->next->prev = entry;
}
return 0;
}
#endif
/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
* bufs until freelist code is used. Note this hides a problem with
* the scratch register * (used to keep track of last buffer
* completed) being written to before * the last buffer has actually
* completed rendering.
*
* KW: It's also a good way to find free buffers quickly.
*/
drm_buf_t *radeon_freelist_get( drm_device_t *dev )
{
......@@ -1264,57 +1173,24 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
drm_radeon_buf_priv_t *buf_priv;
drm_buf_t *buf;
int i, t;
#if ROTATE_BUFS
int start;
#endif
/* FIXME: Optimize -- use freelist code */
for ( i = 0 ; i < dma->buf_count ; i++ ) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if ( buf->pid == 0 ) {
DRM_DEBUG( " ret buf=%d last=%d pid=0\n",
buf->idx, dev_priv->last_buf );
return buf;
}
DRM_DEBUG( " skipping buf=%d pid=%d\n",
buf->idx, buf->pid );
}
#if ROTATE_BUFS
if ( ++dev_priv->last_buf >= dma->buf_count )
dev_priv->last_buf = 0;
start = dev_priv->last_buf;
#endif
for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) {
#if 0
/* FIXME: Disable this for now */
u32 done_age = dev_priv->scratch[RADEON_LAST_DISPATCH];
#else
u32 done_age = RADEON_READ( RADEON_LAST_DISPATCH_REG );
#endif
#if ROTATE_BUFS
for ( i = start ; i < dma->buf_count ; i++ ) {
#else
for ( i = 0 ; i < dma->buf_count ; i++ ) {
#endif
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if ( buf->pending && buf_priv->age <= done_age ) {
/* The buffer has been processed, so it
* can now be used.
*/
if ( buf->pid == 0 || (buf->pending &&
buf_priv->age <= done_age) ) {
buf->pending = 0;
DRM_DEBUG( " ret buf=%d last=%d age=%d done=%d\n", buf->idx, dev_priv->last_buf, buf_priv->age, done_age );
return buf;
}
DRM_DEBUG( " skipping buf=%d age=%d done=%d\n",
buf->idx, buf_priv->age,
done_age );
#if ROTATE_BUFS
start = 0;
#endif
}
udelay( 1 );
}
......@@ -1326,14 +1202,10 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
void radeon_freelist_reset( drm_device_t *dev )
{
drm_device_dma_t *dma = dev->dma;
#if ROTATE_BUFS
drm_radeon_private_t *dev_priv = dev->dev_private;
#endif
int i;
#if ROTATE_BUFS
dev_priv->last_buf = 0;
#endif
for ( i = 0 ; i < dma->buf_count ; i++ ) {
drm_buf_t *buf = dma->buflist[i];
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment