Commit 3cb904ca authored by Linus Torvalds's avatar Linus Torvalds

Merge refs/heads/drm-latest from master.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6

parents d8971fcb 7a9aff3c
...@@ -23,13 +23,6 @@ config DRM_TDFX ...@@ -23,13 +23,6 @@ config DRM_TDFX
Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
graphics card. If M is selected, the module will be called tdfx. graphics card. If M is selected, the module will be called tdfx.
config DRM_GAMMA
tristate "3dlabs GMX 2000"
depends on DRM && BROKEN
help
This is the old gamma driver, please tell me if it might actually
work.
config DRM_R128 config DRM_R128
tristate "ATI Rage 128" tristate "ATI Rage 128"
depends on DRM && PCI depends on DRM && PCI
...@@ -82,7 +75,7 @@ endchoice ...@@ -82,7 +75,7 @@ endchoice
config DRM_MGA config DRM_MGA
tristate "Matrox g200/g400" tristate "Matrox g200/g400"
depends on DRM && AGP depends on DRM
help help
Choose this option if you have a Matrox G200, G400 or G450 graphics Choose this option if you have a Matrox G200, G400 or G450 graphics
card. If M is selected, the module will be called mga. AGP card. If M is selected, the module will be called mga. AGP
...@@ -103,3 +96,10 @@ config DRM_VIA ...@@ -103,3 +96,10 @@ config DRM_VIA
Choose this option if you have a Via unichrome or compatible video Choose this option if you have a Via unichrome or compatible video
chipset. If M is selected the module will be called via. chipset. If M is selected the module will be called via.
config DRM_SAVAGE
tristate "Savage video cards"
depends on DRM
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
...@@ -8,16 +8,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ ...@@ -8,16 +8,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o drm_sysfs.o
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o i830_irq.o i830-objs := i830_drv.o i830_dma.o i830_irq.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
ffb-objs := ffb_drv.o ffb_context.o ffb-objs := ffb_drv.o ffb_context.o
sis-objs := sis_drv.o sis_ds.o sis_mm.o sis-objs := sis_drv.o sis_ds.o sis_mm.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
ifeq ($(CONFIG_COMPAT),y) ifeq ($(CONFIG_COMPAT),y)
...@@ -29,7 +29,6 @@ i915-objs += i915_ioc32.o ...@@ -29,7 +29,6 @@ i915-objs += i915_ioc32.o
endif endif
obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o obj-$(CONFIG_DRM_RADEON)+= radeon.o
...@@ -39,5 +38,7 @@ obj-$(CONFIG_DRM_I830) += i830.o ...@@ -39,5 +38,7 @@ obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_FFB) += ffb.o obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_SIS) += sis.o obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
obj-$(CONFIG_DRM_VIA) +=via.o obj-$(CONFIG_DRM_VIA) +=via.o
...@@ -98,7 +98,7 @@ ...@@ -98,7 +98,7 @@
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned long drm_handle_t; typedef unsigned int drm_handle_t;
typedef unsigned int drm_context_t; typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t; typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t; typedef unsigned int drm_magic_t;
...@@ -209,7 +209,8 @@ typedef enum drm_map_type { ...@@ -209,7 +209,8 @@ typedef enum drm_map_type {
_DRM_REGISTERS = 1, /**< no caching, no core dump */ _DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */ _DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */ _DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4 /**< Scatter/gather memory for PCI DMA */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
} drm_map_type_t; } drm_map_type_t;
...@@ -368,7 +369,8 @@ typedef struct drm_buf_desc { ...@@ -368,7 +369,8 @@ typedef struct drm_buf_desc {
enum { enum {
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04 /**< Scatter/gather memory buffer */ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
_DRM_FB_BUFFER = 0x08 /**< Buffer is in frame buffer */
} flags; } flags;
unsigned long agp_start; /**< unsigned long agp_start; /**<
* Start address of where the AGP buffers are * Start address of where the AGP buffers are
......
...@@ -53,7 +53,6 @@ ...@@ -53,7 +53,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/version.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/smp_lock.h> /* For (un)lock_kernel */ #include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/mm.h> #include <linux/mm.h>
...@@ -96,6 +95,7 @@ ...@@ -96,6 +95,7 @@
#define DRIVER_IRQ_SHARED 0x80 #define DRIVER_IRQ_SHARED 0x80
#define DRIVER_IRQ_VBL 0x100 #define DRIVER_IRQ_VBL 0x100
#define DRIVER_DMA_QUEUE 0x200 #define DRIVER_DMA_QUEUE 0x200
#define DRIVER_FB_DMA 0x400
/***********************************************************************/ /***********************************************************************/
/** \name Begin the DRM... */ /** \name Begin the DRM... */
...@@ -160,36 +160,7 @@ ...@@ -160,36 +160,7 @@
#define pte_unmap(pte) #define pte_unmap(pte)
#endif #endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
static inline struct page * vmalloc_to_page(void * vmalloc_addr)
{
unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, addr);
if (!pmd_none(*pmd)) {
preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
preempt_enable();
}
}
return page;
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
#define DRM_RPR_ARG(vma)
#else
#define DRM_RPR_ARG(vma) vma, #define DRM_RPR_ARG(vma) vma,
#endif
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
...@@ -474,7 +445,8 @@ typedef struct drm_device_dma { ...@@ -474,7 +445,8 @@ typedef struct drm_device_dma {
unsigned long byte_count; unsigned long byte_count;
enum { enum {
_DRM_DMA_USE_AGP = 0x01, _DRM_DMA_USE_AGP = 0x01,
_DRM_DMA_USE_SG = 0x02 _DRM_DMA_USE_SG = 0x02,
_DRM_DMA_USE_FB = 0x04
} flags; } flags;
} drm_device_dma_t; } drm_device_dma_t;
...@@ -525,12 +497,19 @@ typedef struct drm_sigdata { ...@@ -525,12 +497,19 @@ typedef struct drm_sigdata {
drm_hw_lock_t *lock; drm_hw_lock_t *lock;
} drm_sigdata_t; } drm_sigdata_t;
typedef struct drm_dma_handle {
dma_addr_t busaddr;
void *vaddr;
size_t size;
} drm_dma_handle_t;
/** /**
* Mappings list * Mappings list
*/ */
typedef struct drm_map_list { typedef struct drm_map_list {
struct list_head head; /**< list head */ struct list_head head; /**< list head */
drm_map_t *map; /**< mapping */ drm_map_t *map; /**< mapping */
unsigned int user_token;
} drm_map_list_t; } drm_map_list_t;
typedef drm_map_t drm_local_map_t; typedef drm_map_t drm_local_map_t;
...@@ -578,7 +557,22 @@ struct drm_driver { ...@@ -578,7 +557,22 @@ struct drm_driver {
int (*kernel_context_switch)(struct drm_device *dev, int old, int new); int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock); void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock);
int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence); int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence);
/**
* Called by \c drm_device_is_agp. Typically used to determine if a
* card is really attached to AGP or not.
*
* \param dev DRM device handle
*
* \returns
* One of three values is returned depending on whether or not the
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP
* (return of 1), or may or may not be AGP (return of 2).
*/
int (*device_is_agp) (struct drm_device * dev);
/* these have to be filled in */ /* these have to be filled in */
int (*postinit)(struct drm_device *, unsigned long flags); int (*postinit)(struct drm_device *, unsigned long flags);
irqreturn_t (*irq_handler)( DRM_IRQ_ARGS ); irqreturn_t (*irq_handler)( DRM_IRQ_ARGS );
void (*irq_preinstall)(struct drm_device *dev); void (*irq_preinstall)(struct drm_device *dev);
...@@ -722,11 +716,7 @@ typedef struct drm_device { ...@@ -722,11 +716,7 @@ typedef struct drm_device {
int pci_slot; /**< PCI slot number */ int pci_slot; /**< PCI slot number */
int pci_func; /**< PCI function number */ int pci_func; /**< PCI function number */
#ifdef __alpha__ #ifdef __alpha__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
struct pci_controler *hose;
#else
struct pci_controller *hose; struct pci_controller *hose;
#endif
#endif #endif
drm_sg_mem_t *sg; /**< Scatter gather memory */ drm_sg_mem_t *sg; /**< Scatter gather memory */
unsigned long *ctx_bitmap; /**< context bitmap */ unsigned long *ctx_bitmap; /**< context bitmap */
...@@ -736,6 +726,7 @@ typedef struct drm_device { ...@@ -736,6 +726,7 @@ typedef struct drm_device {
struct drm_driver *driver; struct drm_driver *driver;
drm_local_map_t *agp_buffer_map; drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
drm_head_t primary; /**< primary screen head */ drm_head_t primary; /**< primary screen head */
} drm_device_t; } drm_device_t;
...@@ -806,7 +797,7 @@ extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size, ...@@ -806,7 +797,7 @@ extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
drm_device_t *dev); drm_device_t *dev);
extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev); extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev);
extern DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type); extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM *handle, int pages); extern int drm_free_agp(DRM_AGP_MEM *handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start); extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start);
extern int drm_unbind_agp(DRM_AGP_MEM *handle); extern int drm_unbind_agp(DRM_AGP_MEM *handle);
...@@ -881,11 +872,19 @@ extern int drm_lock_free(drm_device_t *dev, ...@@ -881,11 +872,19 @@ extern int drm_lock_free(drm_device_t *dev,
unsigned int context); unsigned int context);
/* Buffer management support (drm_bufs.h) */ /* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request);
extern int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request);
extern int drm_addmap(drm_device_t *dev, unsigned int offset,
unsigned int size, drm_map_type_t type,
drm_map_flags_t flags, drm_local_map_t **map_ptr);
extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map);
extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map);
extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_order( unsigned long size ); extern int drm_order( unsigned long size );
extern int drm_addmap( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int drm_rmmap( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int drm_addbufs( struct inode *inode, struct file *filp, extern int drm_addbufs( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
extern int drm_infobufs( struct inode *inode, struct file *filp, extern int drm_infobufs( struct inode *inode, struct file *filp,
...@@ -896,6 +895,10 @@ extern int drm_freebufs( struct inode *inode, struct file *filp, ...@@ -896,6 +895,10 @@ extern int drm_freebufs( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
extern int drm_mapbufs( struct inode *inode, struct file *filp, extern int drm_mapbufs( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
extern unsigned long drm_get_resource_start(drm_device_t *dev,
unsigned int resource);
extern unsigned long drm_get_resource_len(drm_device_t *dev,
unsigned int resource);
/* DMA support (drm_dma.h) */ /* DMA support (drm_dma.h) */
extern int drm_dma_setup(drm_device_t *dev); extern int drm_dma_setup(drm_device_t *dev);
...@@ -919,14 +922,17 @@ extern void drm_vbl_send_signals( drm_device_t *dev ); ...@@ -919,14 +922,17 @@ extern void drm_vbl_send_signals( drm_device_t *dev );
/* AGP/GART support (drm_agpsupport.h) */ /* AGP/GART support (drm_agpsupport.h) */
extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
extern int drm_agp_acquire(struct inode *inode, struct file *filp, extern int drm_agp_acquire(drm_device_t * dev);
extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern void drm_agp_do_release(drm_device_t *dev); extern int drm_agp_release(drm_device_t *dev);
extern int drm_agp_release(struct inode *inode, struct file *filp, extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_agp_enable(struct inode *inode, struct file *filp, extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode);
extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_agp_info(struct inode *inode, struct file *filp, extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info);
extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_agp_alloc(struct inode *inode, struct file *filp, extern int drm_agp_alloc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
...@@ -976,12 +982,10 @@ extern int drm_ati_pcigart_cleanup(drm_device_t *dev, ...@@ -976,12 +982,10 @@ extern int drm_ati_pcigart_cleanup(drm_device_t *dev,
unsigned long addr, unsigned long addr,
dma_addr_t bus_addr); dma_addr_t bus_addr);
extern void *drm_pci_alloc(drm_device_t * dev, size_t size, extern drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size,
size_t align, dma_addr_t maxaddr, size_t align, dma_addr_t maxaddr);
dma_addr_t * busaddr); extern void __drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
extern void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
extern void drm_pci_free(drm_device_t * dev, size_t size,
void *vaddr, dma_addr_t busaddr);
/* sysfs support (drm_sysfs.c) */ /* sysfs support (drm_sysfs.c) */
struct drm_sysfs_class; struct drm_sysfs_class;
...@@ -1012,17 +1016,26 @@ static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_devi ...@@ -1012,17 +1016,26 @@ static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_devi
drm_ioremapfree( map->handle, map->size, dev ); drm_ioremapfree( map->handle, map->size, dev );
} }
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset) static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token)
{ {
struct list_head *_list; drm_map_list_t *_entry;
list_for_each( _list, &dev->maplist->head ) { list_for_each_entry(_entry, &dev->maplist->head, head)
drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head ); if (_entry->user_token == token)
if ( _entry->map &&
_entry->map->offset == offset ) {
return _entry->map; return _entry->map;
return NULL;
}
static __inline__ int drm_device_is_agp(drm_device_t *dev)
{
if ( dev->driver->device_is_agp != NULL ) {
int err = (*dev->driver->device_is_agp)( dev );
if (err != 2) {
return err;
} }
} }
return NULL;
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
} }
static __inline__ void drm_core_dropmap(struct drm_map *map) static __inline__ void drm_core_dropmap(struct drm_map *map)
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#if __OS_HAS_AGP #if __OS_HAS_AGP
/** /**
* AGP information ioctl. * Get AGP information.
* *
* \param inode device inode. * \param inode device inode.
* \param filp file pointer. * \param filp file pointer.
...@@ -48,51 +48,56 @@ ...@@ -48,51 +48,56 @@
* Verifies the AGP device has been initialized and acquired and fills in the * Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info. * drm_agp_info structure with the information in drm_agp_head::agp_info.
*/ */
int drm_agp_info(struct inode *inode, struct file *filp, int drm_agp_info(drm_device_t *dev, drm_agp_info_t *info)
unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
DRM_AGP_KERN *kern; DRM_AGP_KERN *kern;
drm_agp_info_t info;
if (!dev->agp || !dev->agp->acquired) if (!dev->agp || !dev->agp->acquired)
return -EINVAL; return -EINVAL;
kern = &dev->agp->agp_info; kern = &dev->agp->agp_info;
info.agp_version_major = kern->version.major; info->agp_version_major = kern->version.major;
info.agp_version_minor = kern->version.minor; info->agp_version_minor = kern->version.minor;
info.mode = kern->mode; info->mode = kern->mode;
info.aperture_base = kern->aper_base; info->aperture_base = kern->aper_base;
info.aperture_size = kern->aper_size * 1024 * 1024; info->aperture_size = kern->aper_size * 1024 * 1024;
info.memory_allowed = kern->max_memory << PAGE_SHIFT; info->memory_allowed = kern->max_memory << PAGE_SHIFT;
info.memory_used = kern->current_memory << PAGE_SHIFT; info->memory_used = kern->current_memory << PAGE_SHIFT;
info.id_vendor = kern->device->vendor; info->id_vendor = kern->device->vendor;
info.id_device = kern->device->device; info->id_device = kern->device->device;
if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info))) return 0;
}
EXPORT_SYMBOL(drm_agp_info);
int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_agp_info_t info;
int err;
err = drm_agp_info(dev, &info);
if (err)
return err;
if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
/** /**
* Acquire the AGP device (ioctl). * Acquire the AGP device.
* *
* \param inode device inode. * \param dev DRM device that is to acquire AGP
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* Verifies the AGP device hasn't been acquired before and calls * Verifies the AGP device hasn't been acquired before and calls
* agp_acquire(). * \c agp_backend_acquire.
*/ */
int drm_agp_acquire(struct inode *inode, struct file *filp, int drm_agp_acquire(drm_device_t *dev)
unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
if (!dev->agp) if (!dev->agp)
return -ENODEV; return -ENODEV;
if (dev->agp->acquired) if (dev->agp->acquired)
...@@ -102,9 +107,10 @@ int drm_agp_acquire(struct inode *inode, struct file *filp, ...@@ -102,9 +107,10 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
dev->agp->acquired = 1; dev->agp->acquired = 1;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_agp_acquire);
/** /**
* Release the AGP device (ioctl). * Acquire the AGP device (ioctl).
* *
* \param inode device inode. * \param inode device inode.
* \param filp file pointer. * \param filp file pointer.
...@@ -112,63 +118,80 @@ int drm_agp_acquire(struct inode *inode, struct file *filp, ...@@ -112,63 +118,80 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
* \param arg user argument. * \param arg user argument.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* Verifies the AGP device has been acquired and calls agp_backend_release(). * Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/ */
int drm_agp_release(struct inode *inode, struct file *filp, int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data; drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
agp_backend_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
return drm_agp_acquire( (drm_device_t *) priv->head->dev );
} }
/** /**
* Release the AGP device. * Release the AGP device.
* *
* Calls agp_backend_release(). * \param dev DRM device that is to release AGP
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/ */
void drm_agp_do_release(drm_device_t *dev) int drm_agp_release(drm_device_t *dev)
{ {
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
agp_backend_release(dev->agp->bridge); agp_backend_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
}
EXPORT_SYMBOL(drm_agp_release);
int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
return drm_agp_release(dev);
} }
/** /**
* Enable the AGP bus. * Enable the AGP bus.
* *
* \param inode device inode. * \param dev DRM device that has previously acquired AGP.
* \param filp file pointer. * \param mode Requested AGP mode.
* \param cmd command.
* \param arg pointer to a drm_agp_mode structure.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* Verifies the AGP device has been acquired but not enabled, and calls * Verifies the AGP device has been acquired but not enabled, and calls
* agp_enable(). * \c agp_enable.
*/ */
int drm_agp_enable(struct inode *inode, struct file *filp, int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_agp_mode_t mode;
if (!dev->agp || !dev->agp->acquired) if (!dev->agp || !dev->agp->acquired)
return -EINVAL; return -EINVAL;
if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
return -EFAULT;
dev->agp->mode = mode.mode; dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode); agp_enable(dev->agp->bridge, mode.mode);
dev->agp->base = dev->agp->agp_info.aper_base; dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1; dev->agp->enabled = 1;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_agp_enable);
int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_agp_mode_t mode;
if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
return -EFAULT;
return drm_agp_enable(dev, mode);
}
/** /**
* Allocate AGP memory. * Allocate AGP memory.
...@@ -206,7 +229,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp, ...@@ -206,7 +229,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request.type; type = (u32) request.type;
if (!(memory = drm_alloc_agp(dev->agp->bridge, pages, type))) { if (!(memory = drm_alloc_agp(dev, pages, type))) {
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -ENOMEM; return -ENOMEM;
} }
...@@ -403,13 +426,8 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev) ...@@ -403,13 +426,8 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
return NULL; return NULL;
} }
head->memory = NULL; head->memory = NULL;
#if LINUX_VERSION_CODE <= 0x020408
head->cant_use_aperture = 0;
head->page_mask = ~(0xfff);
#else
head->cant_use_aperture = head->agp_info.cant_use_aperture; head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask; head->page_mask = head->agp_info.page_mask;
#endif
return head; return head;
} }
...@@ -436,6 +454,7 @@ int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start) ...@@ -436,6 +454,7 @@ int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start)
return -EINVAL; return -EINVAL;
return agp_bind_memory(handle, start); return agp_bind_memory(handle, start);
} }
EXPORT_SYMBOL(drm_agp_bind_memory);
/** Calls agp_unbind_memory() */ /** Calls agp_unbind_memory() */
int drm_agp_unbind_memory(DRM_AGP_MEM *handle) int drm_agp_unbind_memory(DRM_AGP_MEM *handle)
......
...@@ -36,37 +36,69 @@ ...@@ -36,37 +36,69 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "drmP.h" #include "drmP.h"
/** unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
* Compute size order. Returns the exponent of the smaller power of two which
* is greater or equal to given number.
*
* \param size size.
* \return order.
*
* \todo Can be made faster.
*/
int drm_order( unsigned long size )
{ {
int order; return pci_resource_start(dev->pdev, resource);
unsigned long tmp; }
EXPORT_SYMBOL(drm_get_resource_start);
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
; {
return pci_resource_len(dev->pdev, resource);
}
EXPORT_SYMBOL(drm_get_resource_len);
if (size & (size - 1)) static drm_local_map_t *drm_find_matching_map(drm_device_t *dev,
++order; drm_local_map_t *map)
{
struct list_head *list;
return order; list_for_each(list, &dev->maplist->head) {
drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
if (entry->map && map->type == entry->map->type &&
entry->map->offset == map->offset) {
return entry->map;
}
}
return NULL;
} }
EXPORT_SYMBOL(drm_order);
#ifdef CONFIG_COMPAT
/* /*
* Used to allocate 32-bit handles for _DRM_SHM regions * Used to allocate 32-bit handles for mappings.
* The 0x10000000 value is chosen to be out of the way of
* FB/register and GART physical addresses.
*/ */
static unsigned int map32_handle = 0x10000000; #define START_RANGE 0x10000000
#define END_RANGE 0x40000000
#ifdef _LP64
static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev)
{
static unsigned int map32_handle = START_RANGE;
unsigned int hash;
if (lhandle & 0xffffffff00000000) {
hash = map32_handle;
map32_handle += PAGE_SIZE;
if (map32_handle > END_RANGE)
map32_handle = START_RANGE;
} else
hash = lhandle;
while (1) {
drm_map_list_t *_entry;
list_for_each_entry(_entry, &dev->maplist->head,head) {
if (_entry->user_token == hash)
break;
}
if (&_entry->head == &dev->maplist->head)
return hash;
hash += PAGE_SIZE;
map32_handle += PAGE_SIZE;
}
}
#else
# define HandleID(x,dev) (unsigned int)(x)
#endif #endif
/** /**
...@@ -82,25 +114,23 @@ static unsigned int map32_handle = 0x10000000; ...@@ -82,25 +114,23 @@ static unsigned int map32_handle = 0x10000000;
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel. * applicable and if supported by the kernel.
*/ */
int drm_addmap( struct inode *inode, struct file *filp, int drm_addmap(drm_device_t * dev, unsigned int offset,
unsigned int cmd, unsigned long arg ) unsigned int size, drm_map_type_t type,
drm_map_flags_t flags, drm_local_map_t ** map_ptr)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_map_t *map; drm_map_t *map;
drm_map_t __user *argp = (void __user *)arg;
drm_map_list_t *list; drm_map_list_t *list;
drm_dma_handle_t *dmah;
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */ drm_local_map_t *found_map;
map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
if ( !map ) if ( !map )
return -ENOMEM; return -ENOMEM;
if ( copy_from_user( map, argp, sizeof(*map) ) ) { map->offset = offset;
drm_free( map, sizeof(*map), DRM_MEM_MAPS ); map->size = size;
return -EFAULT; map->flags = flags;
} map->type = type;
/* Only allow shared memory to be removable since we only keep enough /* Only allow shared memory to be removable since we only keep enough
* book keeping information about shared memory to allow for removal * book keeping information about shared memory to allow for removal
...@@ -122,7 +152,7 @@ int drm_addmap( struct inode *inode, struct file *filp, ...@@ -122,7 +152,7 @@ int drm_addmap( struct inode *inode, struct file *filp,
switch ( map->type ) { switch ( map->type ) {
case _DRM_REGISTERS: case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
if ( map->offset + map->size < map->offset || if ( map->offset + map->size < map->offset ||
map->offset < virt_to_phys(high_memory) ) { map->offset < virt_to_phys(high_memory) ) {
drm_free( map, sizeof(*map), DRM_MEM_MAPS ); drm_free( map, sizeof(*map), DRM_MEM_MAPS );
...@@ -132,6 +162,24 @@ int drm_addmap( struct inode *inode, struct file *filp, ...@@ -132,6 +162,24 @@ int drm_addmap( struct inode *inode, struct file *filp,
#ifdef __alpha__ #ifdef __alpha__
map->offset += dev->hose->mem_space->start; map->offset += dev->hose->mem_space->start;
#endif #endif
/* Some drivers preinitialize some maps, without the X Server
* needing to be aware of it. Therefore, we just return success
* when the server tries to create a duplicate map.
*/
found_map = drm_find_matching_map(dev, map);
if (found_map != NULL) {
if (found_map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size, found_map->size);
found_map->size = map->size;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
*map_ptr = found_map;
return 0;
}
if (drm_core_has_MTRR(dev)) { if (drm_core_has_MTRR(dev)) {
if ( map->type == _DRM_FRAME_BUFFER || if ( map->type == _DRM_FRAME_BUFFER ||
(map->flags & _DRM_WRITE_COMBINING) ) { (map->flags & _DRM_WRITE_COMBINING) ) {
...@@ -178,9 +226,22 @@ int drm_addmap( struct inode *inode, struct file *filp, ...@@ -178,9 +226,22 @@ int drm_addmap( struct inode *inode, struct file *filp,
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL; return -EINVAL;
} }
map->offset += dev->sg->handle; map->offset += (unsigned long)dev->sg->virtual;
break;
case _DRM_CONSISTENT:
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
if (!dmah) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -ENOMEM;
}
map->handle = dmah->vaddr;
map->offset = (unsigned long)dmah->busaddr;
kfree(dmah);
break; break;
default: default:
drm_free( map, sizeof(*map), DRM_MEM_MAPS ); drm_free( map, sizeof(*map), DRM_MEM_MAPS );
return -EINVAL; return -EINVAL;
...@@ -196,17 +257,56 @@ int drm_addmap( struct inode *inode, struct file *filp, ...@@ -196,17 +257,56 @@ int drm_addmap( struct inode *inode, struct file *filp,
down(&dev->struct_sem); down(&dev->struct_sem);
list_add(&list->head, &dev->maplist->head); list_add(&list->head, &dev->maplist->head);
#ifdef CONFIG_COMPAT /* Assign a 32-bit handle */
/* Assign a 32-bit handle for _DRM_SHM mappings */
/* We do it here so that dev->struct_sem protects the increment */ /* We do it here so that dev->struct_sem protects the increment */
if (map->type == _DRM_SHM) list->user_token = HandleID(map->type==_DRM_SHM
map->offset = map32_handle += PAGE_SIZE; ? (unsigned long)map->handle
#endif : map->offset, dev);
up(&dev->struct_sem); up(&dev->struct_sem);
if ( copy_to_user( argp, map, sizeof(*map) ) ) *map_ptr = map;
return 0;
}
EXPORT_SYMBOL(drm_addmap);
int drm_addmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_map_t map;
drm_map_t *map_ptr;
drm_map_t __user *argp = (void __user *)arg;
int err;
unsigned long handle = 0;
if (!(filp->f_mode & 3))
return -EACCES; /* Require read/write */
if (copy_from_user(& map, argp, sizeof(map))) {
return -EFAULT;
}
err = drm_addmap(dev, map.offset, map.size, map.type, map.flags,
&map_ptr);
if (err) {
return err;
}
{
drm_map_list_t *_entry;
list_for_each_entry(_entry, &dev->maplist->head, head) {
if (_entry->map == map_ptr)
handle = _entry->user_token;
}
if (!handle)
return -EFAULT; return -EFAULT;
if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset))) }
if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
return -EFAULT;
if (put_user(handle, &argp->handle))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
...@@ -226,64 +326,43 @@ int drm_addmap( struct inode *inode, struct file *filp, ...@@ -226,64 +326,43 @@ int drm_addmap( struct inode *inode, struct file *filp,
* its being used, and free any associate resource (such as MTRR's) if it's not * its being used, and free any associate resource (such as MTRR's) if it's not
* being on use. * being on use.
* *
* \sa addmap(). * \sa drm_addmap
*/ */
int drm_rmmap(struct inode *inode, struct file *filp, int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
struct list_head *list; struct list_head *list;
drm_map_list_t *r_list = NULL; drm_map_list_t *r_list = NULL;
drm_vma_entry_t *pt, *prev; drm_dma_handle_t dmah;
drm_map_t *map;
drm_map_t request;
int found_maps = 0;
if (copy_from_user(&request, (drm_map_t __user *)arg, /* Find the list entry for the map and remove it */
sizeof(request))) {
return -EFAULT;
}
down(&dev->struct_sem);
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
if(r_list->map && if (r_list->map == map) {
r_list->map->offset == (unsigned long) request.handle && list_del(list);
r_list->map->flags & _DRM_REMOVABLE) break; drm_free(list, sizeof(*list), DRM_MEM_MAPS);
break;
}
} }
/* List has wrapped around to the head pointer, or its empty we didn't /* List has wrapped around to the head pointer, or it's empty and we
* find anything. * didn't find anything.
*/ */
if(list == (&dev->maplist->head)) { if (list == (&dev->maplist->head)) {
up(&dev->struct_sem);
return -EINVAL; return -EINVAL;
} }
map = r_list->map;
list_del(list);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
if (pt->vma->vm_private_data == map) found_maps++;
}
if(!found_maps) {
switch (map->type) { switch (map->type) {
case _DRM_REGISTERS: case _DRM_REGISTERS:
drm_ioremapfree(map->handle, map->size, dev);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
if (drm_core_has_MTRR(dev)) { if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
if (map->mtrr >= 0) {
int retcode; int retcode;
retcode = mtrr_del(map->mtrr, retcode = mtrr_del(map->mtrr, map->offset,
map->offset,
map->size); map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode); DRM_DEBUG ("mtrr_del=%d\n", retcode);
}
} }
drm_ioremapfree(map->handle, map->size, dev);
break; break;
case _DRM_SHM: case _DRM_SHM:
vfree(map->handle); vfree(map->handle);
...@@ -291,16 +370,94 @@ int drm_rmmap(struct inode *inode, struct file *filp, ...@@ -291,16 +370,94 @@ int drm_rmmap(struct inode *inode, struct file *filp,
case _DRM_AGP: case _DRM_AGP:
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
break; break;
case _DRM_CONSISTENT:
dmah.vaddr = map->handle;
dmah.busaddr = map->offset;
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
} }
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return 0;
}
EXPORT_SYMBOL(drm_rmmap_locked);
int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
{
int ret;
down(&dev->struct_sem);
ret = drm_rmmap_locked(dev, map);
up(&dev->struct_sem);
return ret;
}
EXPORT_SYMBOL(drm_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems
* like a pointless exercise since they're going away anyway.
*
* One use case might be after addmap is allowed for normal users for SHM and
* gets used by drivers that the server doesn't need to care about. This seems
* unlikely.
*/
int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_map_t request;
drm_local_map_t *map = NULL;
struct list_head *list;
int ret;
if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
return -EFAULT;
} }
down(&dev->struct_sem);
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
if (r_list->map &&
r_list->user_token == (unsigned long) request.handle &&
r_list->map->flags & _DRM_REMOVABLE) {
map = r_list->map;
break;
}
}
/* List has wrapped around to the head pointer, or its empty we didn't
* find anything.
*/
if (list == (&dev->maplist->head)) {
up(&dev->struct_sem);
return -EINVAL;
}
if (!map)
return -EINVAL;
/* Register and framebuffer maps are permanent */
if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
up(&dev->struct_sem); up(&dev->struct_sem);
return 0; return 0;
}
ret = drm_rmmap_locked(dev, map);
up(&dev->struct_sem);
return ret;
} }
/** /**
* Cleanup after an error on one of the addbufs() functions. * Cleanup after an error on one of the addbufs() functions.
* *
* \param dev DRM device.
* \param entry buffer entry where the error occurred. * \param entry buffer entry where the error occurred.
* *
* Frees any pages and buffers associated with the given entry. * Frees any pages and buffers associated with the given entry.
...@@ -344,25 +501,19 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) ...@@ -344,25 +501,19 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
#if __OS_HAS_AGP #if __OS_HAS_AGP
/** /**
* Add AGP buffers for DMA transfers (ioctl). * Add AGP buffers for DMA transfers.
* *
* \param inode device inode. * \param dev drm_device_t to which the buffers are to be added.
* \param filp file pointer. * \param request pointer to a drm_buf_desc_t describing the request.
* \param cmd command.
* \param arg pointer to a drm_buf_desc_t request.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* After some sanity checks creates a drm_buf structure for each buffer and * After some sanity checks creates a drm_buf structure for each buffer and
* reallocates the buffer list of the same size order to accommodate the new * reallocates the buffer list of the same size order to accommodate the new
* buffers. * buffers.
*/ */
static int drm_addbufs_agp( struct inode *inode, struct file *filp, int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
unsigned int cmd, unsigned long arg )
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry; drm_buf_entry_t *entry;
drm_buf_t *buf; drm_buf_t *buf;
unsigned long offset; unsigned long offset;
...@@ -376,25 +527,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp, ...@@ -376,25 +527,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
int byte_count; int byte_count;
int i; int i;
drm_buf_t **temp_buflist; drm_buf_t **temp_buflist;
drm_buf_desc_t __user *argp = (void __user *)arg;
if ( !dma ) return -EINVAL; if ( !dma ) return -EINVAL;
if ( copy_from_user( &request, argp, count = request->count;
sizeof(request) ) ) order = drm_order(request->size);
return -EFAULT;
count = request.count;
order = drm_order( request.size );
size = 1 << order; size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size; ? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
byte_count = 0; byte_count = 0;
agp_offset = dev->agp->base + request.agp_start; agp_offset = dev->agp->base + request->agp_start;
DRM_DEBUG( "count: %d\n", count ); DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order ); DRM_DEBUG( "order: %d\n", order );
...@@ -508,26 +654,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp, ...@@ -508,26 +654,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
up( &dev->struct_sem ); up( &dev->struct_sem );
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP; dma->flags = _DRM_DMA_USE_AGP;
atomic_dec( &dev->buf_alloc ); atomic_dec( &dev->buf_alloc );
return 0; return 0;
} }
EXPORT_SYMBOL(drm_addbufs_agp);
#endif /* __OS_HAS_AGP */ #endif /* __OS_HAS_AGP */
static int drm_addbufs_pci( struct inode *inode, struct file *filp, int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
unsigned int cmd, unsigned long arg )
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count; int count;
int order; int order;
int size; int size;
...@@ -543,26 +683,22 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp, ...@@ -543,26 +683,22 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
int page_count; int page_count;
unsigned long *temp_pagelist; unsigned long *temp_pagelist;
drm_buf_t **temp_buflist; drm_buf_t **temp_buflist;
drm_buf_desc_t __user *argp = (void __user *)arg;
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
if ( !dma ) return -EINVAL; if ( !dma ) return -EINVAL;
if ( copy_from_user( &request, argp, sizeof(request) ) ) count = request->count;
return -EFAULT; order = drm_order(request->size);
count = request.count;
order = drm_order( request.size );
size = 1 << order; size = 1 << order;
DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
request.count, request.size, size, request->count, request->size, size,
order, dev->queue_count ); order, dev->queue_count );
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
if ( dev->queue_count ) return -EBUSY; /* Not while in use */ if ( dev->queue_count ) return -EBUSY; /* Not while in use */
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size; ? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
...@@ -740,25 +876,18 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp, ...@@ -740,25 +876,18 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
up( &dev->struct_sem ); up( &dev->struct_sem );
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
atomic_dec( &dev->buf_alloc ); atomic_dec( &dev->buf_alloc );
return 0; return 0;
} }
EXPORT_SYMBOL(drm_addbufs_pci);
static int drm_addbufs_sg( struct inode *inode, struct file *filp, static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
unsigned int cmd, unsigned long arg )
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t __user *argp = (void __user *)arg;
drm_buf_desc_t request;
drm_buf_entry_t *entry; drm_buf_entry_t *entry;
drm_buf_t *buf; drm_buf_t *buf;
unsigned long offset; unsigned long offset;
...@@ -777,20 +906,17 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, ...@@ -777,20 +906,17 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
if ( !dma ) return -EINVAL; if ( !dma ) return -EINVAL;
if ( copy_from_user( &request, argp, sizeof(request) ) ) count = request->count;
return -EFAULT; order = drm_order(request->size);
count = request.count;
order = drm_order( request.size );
size = 1 << order; size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size; ? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
byte_count = 0; byte_count = 0;
agp_offset = request.agp_start; agp_offset = request->agp_start;
DRM_DEBUG( "count: %d\n", count ); DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order ); DRM_DEBUG( "order: %d\n", order );
...@@ -848,7 +974,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, ...@@ -848,7 +974,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
buf->offset = (dma->byte_count + offset); buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset; buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle); buf->address = (void *)(agp_offset + offset
+ (unsigned long)dev->sg->virtual);
buf->next = NULL; buf->next = NULL;
buf->waiting = 0; buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
...@@ -905,11 +1032,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, ...@@ -905,11 +1032,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
up( &dev->struct_sem ); up( &dev->struct_sem );
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_SG; dma->flags = _DRM_DMA_USE_SG;
...@@ -917,6 +1041,161 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, ...@@ -917,6 +1041,161 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
return 0; return 0;
} }
int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
drm_buf_t **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
return -EINVAL;
if (!dma)
return -EINVAL;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request->agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
if (dev->queue_count)
return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while (entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->filp = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
DRM_DEBUG("byte_count: %d\n", byte_count);
temp_buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist), DRM_MEM_BUFS);
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->buflist = temp_buflist;
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
up(&dev->struct_sem);
request->count = entry->buf_count;
request->size = size;
dma->flags = _DRM_DMA_USE_FB;
atomic_dec(&dev->buf_alloc);
return 0;
}
/** /**
* Add buffers for DMA transfers (ioctl). * Add buffers for DMA transfers (ioctl).
* *
...@@ -937,6 +1216,7 @@ int drm_addbufs( struct inode *inode, struct file *filp, ...@@ -937,6 +1216,7 @@ int drm_addbufs( struct inode *inode, struct file *filp,
drm_buf_desc_t request; drm_buf_desc_t request;
drm_file_t *priv = filp->private_data; drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev; drm_device_t *dev = priv->head->dev;
int ret;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL; return -EINVAL;
...@@ -947,13 +1227,23 @@ int drm_addbufs( struct inode *inode, struct file *filp, ...@@ -947,13 +1227,23 @@ int drm_addbufs( struct inode *inode, struct file *filp,
#if __OS_HAS_AGP #if __OS_HAS_AGP
if ( request.flags & _DRM_AGP_BUFFER ) if ( request.flags & _DRM_AGP_BUFFER )
return drm_addbufs_agp( inode, filp, cmd, arg ); ret=drm_addbufs_agp(dev, &request);
else else
#endif #endif
if ( request.flags & _DRM_SG_BUFFER ) if ( request.flags & _DRM_SG_BUFFER )
return drm_addbufs_sg( inode, filp, cmd, arg ); ret=drm_addbufs_sg(dev, &request);
else if ( request.flags & _DRM_FB_BUFFER)
ret=drm_addbufs_fb(dev, &request);
else else
return drm_addbufs_pci( inode, filp, cmd, arg ); ret=drm_addbufs_pci(dev, &request);
if (ret==0) {
if (copy_to_user((void __user *)arg, &request,
sizeof(request))) {
ret = -EFAULT;
}
}
return ret;
} }
...@@ -1196,43 +1486,31 @@ int drm_mapbufs( struct inode *inode, struct file *filp, ...@@ -1196,43 +1486,31 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
return -EFAULT; return -EFAULT;
if ( request.count >= dma->buf_count ) { if ( request.count >= dma->buf_count ) {
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
(drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { || (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))
|| (drm_core_check_feature(dev, DRIVER_FB_DMA)
&& (dma->flags & _DRM_DMA_USE_FB))) {
drm_map_t *map = dev->agp_buffer_map; drm_map_t *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
if ( !map ) { if ( !map ) {
retcode = -EINVAL; retcode = -EINVAL;
goto done; goto done;
} }
#if LINUX_VERSION_CODE <= 0x020402
down( &current->mm->mmap_sem );
#else
down_write( &current->mm->mmap_sem ); down_write( &current->mm->mmap_sem );
#endif
virtual = do_mmap( filp, 0, map->size, virtual = do_mmap( filp, 0, map->size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED, MAP_SHARED,
(unsigned long)map->offset ); token );
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
up_write( &current->mm->mmap_sem ); up_write( &current->mm->mmap_sem );
#endif
} else { } else {
#if LINUX_VERSION_CODE <= 0x020402
down( &current->mm->mmap_sem );
#else
down_write( &current->mm->mmap_sem ); down_write( &current->mm->mmap_sem );
#endif
virtual = do_mmap( filp, 0, dma->byte_count, virtual = do_mmap( filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED, 0 ); MAP_SHARED, 0 );
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
up_write( &current->mm->mmap_sem ); up_write( &current->mm->mmap_sem );
#endif
} }
if ( virtual > -1024UL ) { if ( virtual > -1024UL ) {
/* Real error */ /* Real error */
...@@ -1279,3 +1557,26 @@ int drm_mapbufs( struct inode *inode, struct file *filp, ...@@ -1279,3 +1557,26 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
return retcode; return retcode;
} }
/**
* Compute size order. Returns the exponent of the smaller power of two which
* is greater or equal to given number.
*
* \param size size.
* \return order.
*
* \todo Can be made faster.
*/
int drm_order( unsigned long size )
{
int order;
unsigned long tmp;
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
;
if (size & (size - 1))
++order;
return order;
}
EXPORT_SYMBOL(drm_order);
...@@ -212,6 +212,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp, ...@@ -212,6 +212,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t __user *argp = (void __user *)arg; drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request; drm_ctx_priv_map_t request;
drm_map_t *map; drm_map_t *map;
drm_map_list_t *_entry;
if (copy_from_user(&request, argp, sizeof(request))) if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT; return -EFAULT;
...@@ -225,7 +226,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp, ...@@ -225,7 +226,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
map = dev->context_sareas[request.ctx_id]; map = dev->context_sareas[request.ctx_id];
up(&dev->struct_sem); up(&dev->struct_sem);
request.handle = (void *) map->offset; request.handle = 0;
list_for_each_entry(_entry, &dev->maplist->head,head) {
if (_entry->map == map) {
request.handle = (void *)(unsigned long)_entry->user_token;
break;
}
}
if (request.handle == 0)
return -EINVAL;
if (copy_to_user(argp, &request, sizeof(request))) if (copy_to_user(argp, &request, sizeof(request)))
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -262,7 +273,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, ...@@ -262,7 +273,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
if (r_list->map if (r_list->map
&& r_list->map->offset == (unsigned long) request.handle) && r_list->user_token == (unsigned long) request.handle)
goto found; goto found;
} }
bad: bad:
...@@ -369,7 +380,7 @@ int drm_resctx( struct inode *inode, struct file *filp, ...@@ -369,7 +380,7 @@ int drm_resctx( struct inode *inode, struct file *filp,
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
ctx.handle = i; ctx.handle = i;
if ( copy_to_user( &res.contexts[i], if ( copy_to_user( &res.contexts[i],
&i, sizeof(i) ) ) &ctx, sizeof(ctx) ) )
return -EFAULT; return -EFAULT;
} }
} }
......
...@@ -70,8 +70,8 @@ static drm_ioctl_desc_t drm_ioctls[] = { ...@@ -70,8 +70,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl,1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 },
...@@ -102,10 +102,10 @@ static drm_ioctl_desc_t drm_ioctls[] = { ...@@ -102,10 +102,10 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 },
#if __OS_HAS_AGP #if __OS_HAS_AGP
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
...@@ -127,14 +127,12 @@ static drm_ioctl_desc_t drm_ioctls[] = { ...@@ -127,14 +127,12 @@ static drm_ioctl_desc_t drm_ioctls[] = {
* *
* Frees every resource in \p dev. * Frees every resource in \p dev.
* *
* \sa drm_device and setup(). * \sa drm_device
*/ */
int drm_takedown( drm_device_t *dev ) int drm_takedown( drm_device_t *dev )
{ {
drm_magic_entry_t *pt, *next; drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_map_list_t *r_list; drm_map_list_t *r_list;
struct list_head *list, *list_next;
drm_vma_entry_t *vma, *vma_next; drm_vma_entry_t *vma, *vma_next;
int i; int i;
...@@ -142,6 +140,7 @@ int drm_takedown( drm_device_t *dev ) ...@@ -142,6 +140,7 @@ int drm_takedown( drm_device_t *dev )
if (dev->driver->pretakedown) if (dev->driver->pretakedown)
dev->driver->pretakedown(dev); dev->driver->pretakedown(dev);
DRM_DEBUG("driver pretakedown completed\n");
if (dev->unique) { if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
...@@ -178,11 +177,16 @@ int drm_takedown( drm_device_t *dev ) ...@@ -178,11 +177,16 @@ int drm_takedown( drm_device_t *dev )
} }
dev->agp->memory = NULL; dev->agp->memory = NULL;
if ( dev->agp->acquired ) drm_agp_do_release(dev); if (dev->agp->acquired)
drm_agp_release(dev);
dev->agp->acquired = 0; dev->agp->acquired = 0;
dev->agp->enabled = 0; dev->agp->enabled = 0;
} }
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
/* Clear vma list (only built for debugging) */ /* Clear vma list (only built for debugging) */
if ( dev->vmalist ) { if ( dev->vmalist ) {
...@@ -194,48 +198,11 @@ int drm_takedown( drm_device_t *dev ) ...@@ -194,48 +198,11 @@ int drm_takedown( drm_device_t *dev )
} }
if( dev->maplist ) { if( dev->maplist ) {
list_for_each_safe( list, list_next, &dev->maplist->head ) { while (!list_empty(&dev->maplist->head)) {
r_list = (drm_map_list_t *)list; struct list_head *list = dev->maplist->head.next;
r_list = list_entry(list, drm_map_list_t, head);
if ( ( map = r_list->map ) ) { drm_rmmap_locked(dev, r_list->map);
switch ( map->type ) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
if (drm_core_has_MTRR(dev)) {
if ( map->mtrr >= 0 ) {
int retcode;
retcode = mtrr_del( map->mtrr,
map->offset,
map->size );
DRM_DEBUG( "mtrr_del=%d\n", retcode );
}
}
drm_ioremapfree( map->handle, map->size, dev );
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
/* Do nothing here, because this is all
* handled in the AGP/GART driver.
*/
break;
case _DRM_SCATTER_GATHER:
/* Handle it */
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
} }
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
list_del( list );
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
}
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
} }
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) { if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) {
...@@ -264,6 +231,7 @@ int drm_takedown( drm_device_t *dev ) ...@@ -264,6 +231,7 @@ int drm_takedown( drm_device_t *dev )
} }
up( &dev->struct_sem ); up( &dev->struct_sem );
DRM_DEBUG("takedown completed\n");
return 0; return 0;
} }
...@@ -312,7 +280,7 @@ EXPORT_SYMBOL(drm_init); ...@@ -312,7 +280,7 @@ EXPORT_SYMBOL(drm_init);
* *
* Cleans up all DRM device, calling takedown(). * Cleans up all DRM device, calling takedown().
* *
* \sa drm_init(). * \sa drm_init
*/ */
static void drm_cleanup( drm_device_t *dev ) static void drm_cleanup( drm_device_t *dev )
{ {
...@@ -325,6 +293,11 @@ static void drm_cleanup( drm_device_t *dev ) ...@@ -325,6 +293,11 @@ static void drm_cleanup( drm_device_t *dev )
drm_takedown( dev ); drm_takedown( dev );
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
}
drm_ctxbitmap_cleanup( dev ); drm_ctxbitmap_cleanup( dev );
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
......
...@@ -71,12 +71,6 @@ static int drm_setup( drm_device_t *dev ) ...@@ -71,12 +71,6 @@ static int drm_setup( drm_device_t *dev )
dev->magiclist[i].tail = NULL; dev->magiclist[i].tail = NULL;
} }
dev->maplist = drm_alloc(sizeof(*dev->maplist),
DRM_MEM_MAPS);
if(dev->maplist == NULL) return -ENOMEM;
memset(dev->maplist, 0, sizeof(*dev->maplist));
INIT_LIST_HEAD(&dev->maplist->head);
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
DRM_MEM_CTXLIST); DRM_MEM_CTXLIST);
if(dev->ctxlist == NULL) return -ENOMEM; if(dev->ctxlist == NULL) return -ENOMEM;
......
...@@ -208,7 +208,7 @@ int drm_getmap( struct inode *inode, struct file *filp, ...@@ -208,7 +208,7 @@ int drm_getmap( struct inode *inode, struct file *filp,
map.size = r_list->map->size; map.size = r_list->map->size;
map.type = r_list->map->type; map.type = r_list->map->type;
map.flags = r_list->map->flags; map.flags = r_list->map->flags;
map.handle = r_list->map->handle; map.handle = (void *)(unsigned long) r_list->user_token;
map.mtrr = r_list->map->mtrr; map.mtrr = r_list->map->mtrr;
up(&dev->struct_sem); up(&dev->struct_sem);
......
...@@ -142,27 +142,31 @@ void drm_free_pages(unsigned long address, int order, int area) ...@@ -142,27 +142,31 @@ void drm_free_pages(unsigned long address, int order, int area)
#if __OS_HAS_AGP #if __OS_HAS_AGP
/** Wrapper around agp_allocate_memory() */ /** Wrapper around agp_allocate_memory() */
DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type) DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
{ {
return drm_agp_allocate_memory(bridge, pages, type); return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
} }
EXPORT_SYMBOL(drm_alloc_agp);
/** Wrapper around agp_free_memory() */ /** Wrapper around agp_free_memory() */
int drm_free_agp(DRM_AGP_MEM *handle, int pages) int drm_free_agp(DRM_AGP_MEM *handle, int pages)
{ {
return drm_agp_free_memory(handle) ? 0 : -EINVAL; return drm_agp_free_memory(handle) ? 0 : -EINVAL;
} }
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */ /** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start) int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
{ {
return drm_agp_bind_memory(handle, start); return drm_agp_bind_memory(handle, start);
} }
EXPORT_SYMBOL(drm_bind_agp);
/** Wrapper around agp_unbind_memory() */ /** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM *handle) int drm_unbind_agp(DRM_AGP_MEM *handle)
{ {
return drm_agp_unbind_memory(handle); return drm_agp_unbind_memory(handle);
} }
EXPORT_SYMBOL(drm_unbind_agp);
#endif /* agp */ #endif /* agp */
#endif /* debug_memory */ #endif /* debug_memory */
...@@ -46,11 +46,11 @@ ...@@ -46,11 +46,11 @@
/** /**
* \brief Allocate a PCI consistent memory block, for DMA. * \brief Allocate a PCI consistent memory block, for DMA.
*/ */
void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
dma_addr_t maxaddr, dma_addr_t * busaddr) dma_addr_t maxaddr)
{ {
void *address; drm_dma_handle_t *dmah;
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA; int area = DRM_MEM_DMA;
spin_lock(&drm_mem_lock); spin_lock(&drm_mem_lock);
...@@ -74,13 +74,19 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, ...@@ -74,13 +74,19 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
return NULL; return NULL;
} }
address = pci_alloc_consistent(dev->pdev, size, busaddr); dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
dmah->size = size;
dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
if (address == NULL) { if (dmah->vaddr == NULL) {
spin_lock(&drm_mem_lock); spin_lock(&drm_mem_lock);
++drm_mem_stats[area].fail_count; ++drm_mem_stats[area].fail_count;
spin_unlock(&drm_mem_lock); spin_unlock(&drm_mem_lock);
kfree(dmah);
return NULL; return NULL;
} }
...@@ -90,37 +96,42 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, ...@@ -90,37 +96,42 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
drm_ram_used += size; drm_ram_used += size;
spin_unlock(&drm_mem_lock); spin_unlock(&drm_mem_lock);
#else #else
if (address == NULL) if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL; return NULL;
}
#endif #endif
memset(address, 0, size); memset(dmah->vaddr, 0, size);
return address; return dmah;
} }
EXPORT_SYMBOL(drm_pci_alloc); EXPORT_SYMBOL(drm_pci_alloc);
/** /**
* \brief Free a PCI consistent memory block. * \brief Free a PCI consistent memory block with freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/ */
void void
drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr) __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
{ {
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA; int area = DRM_MEM_DMA;
int alloc_count; int alloc_count;
int free_count; int free_count;
#endif #endif
if (!vaddr) { if (!dmah->vaddr) {
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
DRM_MEM_ERROR(area, "Attempt to free address 0\n"); DRM_MEM_ERROR(area, "Attempt to free address 0\n");
#endif #endif
} else { } else {
pci_free_consistent(dev->pdev, size, vaddr, busaddr); pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
dmah->busaddr);
} }
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
spin_lock(&drm_mem_lock); spin_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[area].free_count; free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count; alloc_count = drm_mem_stats[area].succeed_count;
...@@ -135,6 +146,16 @@ drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr) ...@@ -135,6 +146,16 @@ drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr)
#endif #endif
} }
/**
* \brief Free a PCI consistent memory block
*/
void
drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
}
EXPORT_SYMBOL(drm_pci_free); EXPORT_SYMBOL(drm_pci_free);
/*@}*/ /*@}*/
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
{0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
{0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
{0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
{0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
{0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
{0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \
...@@ -33,7 +35,17 @@ ...@@ -33,7 +35,17 @@
{0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
{0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
...@@ -56,6 +68,7 @@ ...@@ -56,6 +68,7 @@
{0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
{0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
...@@ -116,9 +129,10 @@ ...@@ -116,9 +129,10 @@
{0, 0, 0} {0, 0, 0}
#define mga_PCI_IDS \ #define mga_PCI_IDS \
{0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
{0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
{0, 0, 0} {0, 0, 0}
#define mach64_PCI_IDS \ #define mach64_PCI_IDS \
...@@ -162,9 +176,10 @@ ...@@ -162,9 +176,10 @@
#define viadrv_PCI_IDS \ #define viadrv_PCI_IDS \
{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0} {0, 0, 0}
#define i810_PCI_IDS \ #define i810_PCI_IDS \
...@@ -181,33 +196,30 @@ ...@@ -181,33 +196,30 @@
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0} {0, 0, 0}
#define gamma_PCI_IDS \
{0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define savage_PCI_IDS \ #define savage_PCI_IDS \
{0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
{0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
{0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
{0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
{0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
{0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
{0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
{0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
{0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
{0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
{0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
{0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
{0, 0, 0} {0, 0, 0}
#define ffb_PCI_IDS \ #define ffb_PCI_IDS \
...@@ -223,10 +235,3 @@ ...@@ -223,10 +235,3 @@
{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0} {0, 0, 0}
#define viadrv_PCI_IDS \
{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
...@@ -210,8 +210,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, ...@@ -210,8 +210,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
/* Hardcoded from _DRM_FRAME_BUFFER, /* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
_DRM_SCATTER_GATHER. */ _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" }; const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
const char *type; const char *type;
int i; int i;
...@@ -229,16 +229,19 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, ...@@ -229,16 +229,19 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) { if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map; map = r_list->map;
if(!map) continue; if(!map)
if (map->type < 0 || map->type > 4) type = "??"; continue;
else type = types[map->type]; if (map->type < 0 || map->type > 5)
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", type = "??";
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
i, i,
map->offset, map->offset,
map->size, map->size,
type, type,
map->flags, map->flags,
(unsigned long)map->handle); r_list->user_token);
if (map->mtrr < 0) { if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n"); DRM_PROC_PRINT("none\n");
} else { } else {
......
...@@ -61,6 +61,12 @@ void drm_sg_cleanup( drm_sg_mem_t *entry ) ...@@ -61,6 +61,12 @@ void drm_sg_cleanup( drm_sg_mem_t *entry )
DRM_MEM_SGLISTS ); DRM_MEM_SGLISTS );
} }
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
int drm_sg_alloc( struct inode *inode, struct file *filp, int drm_sg_alloc( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ) unsigned int cmd, unsigned long arg )
{ {
...@@ -133,12 +139,13 @@ int drm_sg_alloc( struct inode *inode, struct file *filp, ...@@ -133,12 +139,13 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
*/ */
memset( entry->virtual, 0, pages << PAGE_SHIFT ); memset( entry->virtual, 0, pages << PAGE_SHIFT );
entry->handle = (unsigned long)entry->virtual; entry->handle = ScatterHandle((unsigned long)entry->virtual);
DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle ); DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual ); DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) { for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i); entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j]) if (!entry->pagelist[j])
goto failed; goto failed;
......
...@@ -75,6 +75,11 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct ...@@ -75,6 +75,11 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
dev->pci_func = PCI_FUNC(pdev->devfn); dev->pci_func = PCI_FUNC(pdev->devfn);
dev->irq = pdev->irq; dev->irq = pdev->irq;
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
/* the DRM has 6 basic counters */ /* the DRM has 6 basic counters */
dev->counters = 6; dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK; dev->types[0] = _DRM_STAT_LOCK;
...@@ -91,6 +96,7 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct ...@@ -91,6 +96,7 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
goto error_out_unreg; goto error_out_unreg;
if (drm_core_has_AGP(dev)) { if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init(dev); dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
DRM_ERROR( "Cannot initialize the agpgart module.\n" ); DRM_ERROR( "Cannot initialize the agpgart module.\n" );
......
...@@ -73,12 +73,13 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -73,12 +73,13 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map; map = r_list->map;
if (!map) continue; if (!map) continue;
if (map->offset == VM_OFFSET(vma)) break; if (r_list->user_token == VM_OFFSET(vma))
break;
} }
if (map && map->type == _DRM_AGP) { if (map && map->type == _DRM_AGP) {
unsigned long offset = address - vma->vm_start; unsigned long offset = address - vma->vm_start;
unsigned long baddr = VM_OFFSET(vma) + offset; unsigned long baddr = map->offset + offset;
struct drm_agp_mem *agpmem; struct drm_agp_mem *agpmem;
struct page *page; struct page *page;
...@@ -210,6 +211,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) ...@@ -210,6 +211,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
} }
if(!found_maps) { if(!found_maps) {
drm_dma_handle_t dmah;
switch (map->type) { switch (map->type) {
case _DRM_REGISTERS: case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
...@@ -228,6 +231,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) ...@@ -228,6 +231,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_AGP: case _DRM_AGP:
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
break; break;
case _DRM_CONSISTENT:
dmah.vaddr = map->handle;
dmah.busaddr = map->offset;
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
} }
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
} }
...@@ -296,7 +305,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, ...@@ -296,7 +305,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
offset = address - vma->vm_start; offset = address - vma->vm_start;
map_offset = map->offset - dev->sg->handle; map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset]; page = entry->pagelist[page_offset];
get_page(page); get_page(page);
...@@ -305,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, ...@@ -305,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
static struct page *drm_vm_nopage(struct vm_area_struct *vma, static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int *type) { int *type) {
...@@ -335,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, ...@@ -335,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address); return drm_do_vm_sg_nopage(vma, address);
} }
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_nopage(vma, address);
}
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_shm_nopage(vma, address);
}
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_dma_nopage(vma, address);
}
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_sg_nopage(vma, address);
}
#endif
/** AGP virtual memory operations */ /** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = { static struct vm_operations_struct drm_vm_ops = {
.nopage = drm_vm_nopage, .nopage = drm_vm_nopage,
...@@ -487,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) ...@@ -487,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops; vma->vm_ops = &drm_vm_dma_ops;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */ vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma); drm_vm_open(vma);
...@@ -560,13 +534,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -560,13 +534,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
for performance, even if the list was a for performance, even if the list was a
bit longer. */ bit longer. */
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
unsigned long off;
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map; map = r_list->map;
if (!map) continue; if (!map) continue;
off = dev->driver->get_map_ofs(map); if (r_list->user_token == VM_OFFSET(vma))
if (off == VM_OFFSET(vma)) break; break;
} }
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
...@@ -605,17 +578,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -605,17 +578,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* fall through to _DRM_FRAME_BUFFER... */ /* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS: case _DRM_REGISTERS:
if (VM_OFFSET(vma) >= __pa(high_memory)) {
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
} }
#elif defined(__powerpc__) #elif defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
if (map->type == _DRM_REGISTERS)
pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
#endif #endif
vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_flags |= VM_IO; /* not in core dump */
}
#if defined(__ia64__) #if defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end - if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start)) vma->vm_start))
...@@ -628,12 +601,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -628,12 +601,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
offset = dev->driver->get_reg_ofs(dev); offset = dev->driver->get_reg_ofs(dev);
#ifdef __sparc__ #ifdef __sparc__
if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
(VM_OFFSET(vma) + offset) >> PAGE_SHIFT, (map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot)) vma->vm_page_prot))
#else #else
if (io_remap_pfn_range(vma, vma->vm_start, if (io_remap_pfn_range(vma, vma->vm_start,
(VM_OFFSET(vma) + offset) >> PAGE_SHIFT, (map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot)) vma->vm_page_prot))
#endif #endif
...@@ -641,37 +614,28 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -641,37 +614,28 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%lx\n", " offset = 0x%lx\n",
map->type, map->type,
vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset); vma->vm_start, vma->vm_end, map->offset + offset);
vma->vm_ops = &drm_vm_ops; vma->vm_ops = &drm_vm_ops;
break; break;
case _DRM_SHM: case _DRM_SHM:
case _DRM_CONSISTENT:
/* Consistent memory is really like shared memory. It's only
* allocate in a different way */
vma->vm_ops = &drm_vm_shm_ops; vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map; vma->vm_private_data = (void *)map;
/* Don't let this area swap. Change when /* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */ DRM_KERNEL advisory is supported. */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
#endif
break; break;
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops; vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map; vma->vm_private_data = (void *)map;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
#endif
break; break;
default: default:
return -EINVAL; /* This should never happen. */ return -EINVAL; /* This should never happen. */
} }
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */ vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma); drm_vm_open(vma);
......
...@@ -152,14 +152,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off) ...@@ -152,14 +152,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
return NULL; return NULL;
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
unsigned long uoff;
r_list = (drm_map_list_t *)list; r_list = (drm_map_list_t *)list;
map = r_list->map; map = r_list->map;
if (!map) if (!map)
continue; continue;
uoff = (map->offset & 0xffffffff); if (r_list->user_token == off)
if (uoff == off)
return map; return map;
} }
......
/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* ChangeLog:
* 2001-11-16 Torsten Duwe <duwe@caldera.de>
* added context constructor/destructor hooks,
* needed by SiS driver's memory management.
*/
/* ================================================================
* Old-style context support -- only used by gamma.
*/
/* The drm_read and drm_write_string code (especially that which manages
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int left;
int avail;
int send;
int cur;
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n");
if (filp->f_flags & O_NONBLOCK) {
return -EAGAIN;
}
interruptible_sleep_on(&dev->buf_readers);
if (signal_pending(current)) {
DRM_DEBUG(" interrupted\n");
return -ERESTARTSYS;
}
DRM_DEBUG(" awake\n");
}
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left;
send = DRM_MIN(avail, count);
while (send) {
if (dev->buf_wp > dev->buf_rp) {
cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
} else {
cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
}
if (copy_to_user(buf, dev->buf_rp, cur))
return -EFAULT;
dev->buf_rp += cur;
if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
send -= cur;
}
wake_up_interruptible(&dev->buf_writers);
return DRM_MIN(avail, count);
}
/* In an incredibly convoluted setup, the kernel module actually calls
* back into the X server to perform context switches on behalf of the
* 3d clients.
*/
int DRM(write_string)(drm_device_t *dev, const char *s)
{
int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
int send = strlen(s);
int count;
DRM_DEBUG("%d left, %d to send (%p, %p)\n",
left, send, dev->buf_rp, dev->buf_wp);
if (left == 1 || dev->buf_wp != dev->buf_rp) {
DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
left,
dev->buf_wp,
dev->buf_rp);
}
while (send) {
if (dev->buf_wp >= dev->buf_rp) {
count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
if (count == left) --count; /* Leave a hole */
} else {
count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
}
strncpy(dev->buf_wp, s, count);
dev->buf_wp += count;
if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
send -= count;
}
if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
DRM_DEBUG("waking\n");
wake_up_interruptible(&dev->buf_readers);
return 0;
}
unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
poll_wait(filp, &dev->buf_readers, wait);
if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
return 0;
}
int DRM(context_switch)(drm_device_t *dev, int old, int new)
{
char buf[64];
drm_queue_t *q;
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new >= dev->queue_count) {
clear_bit(0, &dev->context_flag);
return -EINVAL;
}
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
q = dev->queuelist[new];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
atomic_dec(&q->use_count);
clear_bit(0, &dev->context_flag);
return -EINVAL;
}
/* This causes the X server to wake up & do a bunch of hardware
* interaction to actually effect the context switch.
*/
sprintf(buf, "C %d %d\n", old, new);
DRM(write_string)(dev, buf);
atomic_dec(&q->use_count);
return 0;
}
int DRM(context_switch_complete)(drm_device_t *dev, int new)
{
drm_device_dma_t *dma = dev->dma;
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("Cannot free lock\n");
}
}
clear_bit(0, &dev->context_flag);
wake_up_interruptible(&dev->context_wait);
return 0;
}
static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
{
DRM_DEBUG("\n");
if (atomic_read(&q->use_count) != 1
|| atomic_read(&q->finalization)
|| atomic_read(&q->block_count)) {
DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count));
}
atomic_set(&q->finalization, 0);
atomic_set(&q->block_count, 0);
atomic_set(&q->block_read, 0);
atomic_set(&q->block_write, 0);
atomic_set(&q->total_queued, 0);
atomic_set(&q->total_flushed, 0);
atomic_set(&q->total_locks, 0);
init_waitqueue_head(&q->write_queue);
init_waitqueue_head(&q->read_queue);
init_waitqueue_head(&q->flush_queue);
q->flags = ctx->flags;
DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
return 0;
}
/* drm_alloc_queue:
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
disappear (so all deallocation must be done after IOCTLs are off)
2) dev->queue_count < dev->queue_slots
3) dev->queuelist[i].use_count == 0 and
dev->queuelist[i].finalization == 0 if i not in use
POST: 1) dev->queuelist[i].use_count == 1
2) dev->queue_count < dev->queue_slots */
static int DRM(alloc_queue)(drm_device_t *dev)
{
int i;
drm_queue_t *queue;
int oldslots;
int newslots;
/* Check for a free queue */
for (i = 0; i < dev->queue_count; i++) {
atomic_inc(&dev->queuelist[i]->use_count);
if (atomic_read(&dev->queuelist[i]->use_count) == 1
&& !atomic_read(&dev->queuelist[i]->finalization)) {
DRM_DEBUG("%d (free)\n", i);
return i;
}
atomic_dec(&dev->queuelist[i]->use_count);
}
/* Allocate a new queue */
down(&dev->struct_sem);
queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
memset(queue, 0, sizeof(*queue));
atomic_set(&queue->use_count, 1);
++dev->queue_count;
if (dev->queue_count >= dev->queue_slots) {
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
if (!dev->queue_slots) dev->queue_slots = 1;
dev->queue_slots *= 2;
newslots = dev->queue_slots * sizeof(*dev->queuelist);
dev->queuelist = DRM(realloc)(dev->queuelist,
oldslots,
newslots,
DRM_MEM_QUEUES);
if (!dev->queuelist) {
up(&dev->struct_sem);
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
}
dev->queuelist[dev->queue_count-1] = queue;
up(&dev->struct_sem);
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
return dev->queue_count - 1;
}
int DRM(resctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_ctx_res_t __user *argp = (void __user *)arg;
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
if (copy_from_user(&res, argp, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
if (copy_to_user(argp, &res, sizeof(res)))
return -EFAULT;
return 0;
}
int DRM(addctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_ctx_t __user *argp = (void __user *)arg;
if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
ctx.handle = DRM(alloc_queue)(dev);
}
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
if (copy_to_user(argp, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int DRM(modctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
if (DRM_BUFCOUNT(&q->waitlist)) {
atomic_dec(&q->use_count);
return -EBUSY;
}
q->flags = ctx.flags;
atomic_dec(&q->use_count);
return 0;
}
int DRM(getctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
drm_queue_t *q;
if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
ctx.flags = q->flags;
atomic_dec(&q->use_count);
if (copy_to_user(argp, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int DRM(switchctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
}
int DRM(newctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
DRM(context_switch_complete)(dev, ctx.handle);
return 0;
}
int DRM(rmctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
drm_buf_t *buf;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
atomic_inc(&q->finalization); /* Mark queue in finalization state */
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
finalization) */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
/* Remove queued buffers */
while ((buf = DRM(waitlist_get)(&q->waitlist))) {
DRM(free_buffer)(dev, buf);
}
clear_bit(0, &dev->interrupt_flag);
/* Wakeup blocked processes */
wake_up_interruptible(&q->read_queue);
wake_up_interruptible(&q->write_queue);
wake_up_interruptible(&q->flush_queue);
/* Finalization over. Queue is made
available when both use_count and
finalization become 0, which won't
happen until all the waiting processes
stop waiting. */
atomic_dec(&q->finalization);
return 0;
}
/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#include "gamma.h"
#include "drmP.h"
#include "drm.h"
#include "gamma_drm.h"
#include "gamma_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
unsigned long length)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
mb();
while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
cpu_relax();
GAMMA_WRITE(GAMMA_DMAADDRESS, address);
while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
cpu_relax();
GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
}
void gamma_dma_quiescent_single(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
cpu_relax();
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
cpu_relax();
GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
GAMMA_WRITE(GAMMA_SYNC, 0);
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
cpu_relax();
} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
}
void gamma_dma_quiescent_dual(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
cpu_relax();
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
cpu_relax();
GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
GAMMA_WRITE(GAMMA_SYNC, 0);
/* Read from first MX */
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
cpu_relax();
} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
/* Read from second MX */
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
cpu_relax();
} while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
}
void gamma_dma_ready(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
cpu_relax();
}
static inline int gamma_dma_is_ready(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
return (!GAMMA_READ(GAMMA_DMACOUNT));
}
irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS )
{
drm_device_t *dev = (drm_device_t *)arg;
drm_device_dma_t *dma = dev->dma;
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
/* FIXME: should check whether we're actually interested in the interrupt? */
atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
cpu_relax();
GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
if (gamma_dma_is_ready(dev)) {
/* Free previous buffer */
if (test_and_set_bit(0, &dev->dma_flag))
return IRQ_HANDLED;
if (dma->this_buffer) {
gamma_free_buffer(dev, dma->this_buffer);
dma->this_buffer = NULL;
}
clear_bit(0, &dev->dma_flag);
/* Dispatch new buffer */
schedule_work(&dev->work);
}
return IRQ_HANDLED;
}
/* Only called by gamma_dma_schedule. */
static int gamma_do_dma(drm_device_t *dev, int locked)
{
unsigned long address;
unsigned long length;
drm_buf_t *buf;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
if (!dma->next_buffer) {
DRM_ERROR("No next_buffer\n");
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
buf = dma->next_buffer;
/* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
/* So we pass the buffer index value into the physical page offset */
address = buf->idx << 12;
length = buf->used;
DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
buf->context, buf->idx, length);
if (buf->list == DRM_LIST_RECLAIM) {
gamma_clear_next_buffer(dev);
gamma_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
if (!length) {
DRM_ERROR("0 length buffer\n");
gamma_clear_next_buffer(dev);
gamma_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return 0;
}
if (!gamma_dma_is_ready(dev)) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
if (buf->while_locked) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Dispatching buffer %d from pid %d"
" \"while locked\", but no lock held\n",
buf->idx, current->pid);
}
} else {
if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
}
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
/* PRE: dev->last_context != buf->context */
if (DRM(context_switch)(dev, dev->last_context,
buf->context)) {
DRM(clear_next_buffer)(dev);
DRM(free_buffer)(dev, buf);
}
retcode = -EBUSY;
goto cleanup;
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
}
gamma_clear_next_buffer(dev);
buf->pending = 1;
buf->waiting = 0;
buf->list = DRM_LIST_PEND;
/* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
address = buf->idx << 12;
gamma_dma_dispatch(dev, address, length);
gamma_free_buffer(dev, dma->this_buffer);
dma->this_buffer = buf;
atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
if (!buf->while_locked && !dev->context_flag && !locked) {
if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
cleanup:
clear_bit(0, &dev->dma_flag);
return retcode;
}
static void gamma_dma_timer_bh(unsigned long dev)
{
gamma_dma_schedule((drm_device_t *)dev, 0);
}
void gamma_irq_immediate_bh(void *dev)
{
gamma_dma_schedule(dev, 0);
}
int gamma_dma_schedule(drm_device_t *dev, int locked)
{
int next;
drm_queue_t *q;
drm_buf_t *buf;
int retcode = 0;
int processed = 0;
int missed;
int expire = 20;
drm_device_dma_t *dma = dev->dma;
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
return -EBUSY;
}
missed = atomic_read(&dev->counts[10]);
again:
if (dev->context_flag) {
clear_bit(0, &dev->interrupt_flag);
return -EBUSY;
}
if (dma->next_buffer) {
/* Unsent buffer that was previously
selected, but that couldn't be sent
because the lock could not be obtained
or the DMA engine wasn't ready. Try
again. */
if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
} else {
do {
next = gamma_select_queue(dev, gamma_dma_timer_bh);
if (next >= 0) {
q = dev->queuelist[next];
buf = gamma_waitlist_get(&q->waitlist);
dma->next_buffer = buf;
dma->next_queue = q;
if (buf && buf->list == DRM_LIST_RECLAIM) {
gamma_clear_next_buffer(dev);
gamma_free_buffer(dev, buf);
}
}
} while (next >= 0 && !dma->next_buffer);
if (dma->next_buffer) {
if (!(retcode = gamma_do_dma(dev, locked))) {
++processed;
}
}
}
if (--expire) {
if (missed != atomic_read(&dev->counts[10])) {
if (gamma_dma_is_ready(dev)) goto again;
}
if (processed && gamma_dma_is_ready(dev)) {
processed = 0;
goto again;
}
}
clear_bit(0, &dev->interrupt_flag);
return retcode;
}
static int gamma_dma_priority(struct file *filp,
drm_device_t *dev, drm_dma_t *d)
{
unsigned long address;
unsigned long length;
int must_free = 0;
int retcode = 0;
int i;
int idx;
drm_buf_t *buf;
drm_buf_t *last_buf = NULL;
drm_device_dma_t *dma = dev->dma;
int *send_indices = NULL;
int *send_sizes = NULL;
DECLARE_WAITQUEUE(entry, current);
/* Turn off interrupt handling */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) return -EINTR;
}
if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
while (!gamma_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
++must_free;
}
send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
DRM_MEM_DRIVER);
if (send_indices == NULL)
return -ENOMEM;
if (copy_from_user(send_indices, d->send_indices,
d->send_count * sizeof(*send_indices))) {
retcode = -EFAULT;
goto cleanup;
}
send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
DRM_MEM_DRIVER);
if (send_sizes == NULL)
return -ENOMEM;
if (copy_from_user(send_sizes, d->send_sizes,
d->send_count * sizeof(*send_sizes))) {
retcode = -EFAULT;
goto cleanup;
}
for (i = 0; i < d->send_count; i++) {
idx = send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
send_indices[i], dma->buf_count - 1);
continue;
}
buf = dma->buflist[ idx ];
if (buf->filp != filp) {
DRM_ERROR("Process %d using buffer not owned\n",
current->pid);
retcode = -EINVAL;
goto cleanup;
}
if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using buffer on list %d\n",
current->pid, buf->list);
retcode = -EINVAL;
goto cleanup;
}
/* This isn't a race condition on
buf->list, since our concern is the
buffer reclaim during the time the
process closes the /dev/drm? handle, so
it can't also be doing DMA. */
buf->list = DRM_LIST_PRIO;
buf->used = send_sizes[i];
buf->context = d->context;
buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
address = (unsigned long)buf->address;
length = buf->used;
if (!length) {
DRM_ERROR("0 length buffer\n");
}
if (buf->pending) {
DRM_ERROR("Sending pending buffer:"
" buffer %d, offset %d\n",
send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
if (buf->waiting) {
DRM_ERROR("Sending waiting buffer:"
" buffer %d, offset %d\n",
send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
buf->pending = 1;
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != buf->context */
DRM(context_switch)(dev, dev->last_context,
buf->context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
retcode = -EINTR;
goto cleanup;
}
if (dev->last_context != buf->context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context,
buf->context);
}
}
gamma_dma_dispatch(dev, address, length);
atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
if (last_buf) {
gamma_free_buffer(dev, last_buf);
}
last_buf = buf;
}
cleanup:
if (last_buf) {
gamma_dma_ready(dev);
gamma_free_buffer(dev, last_buf);
}
if (send_indices)
DRM(free)(send_indices, d->send_count * sizeof(*send_indices),
DRM_MEM_DRIVER);
if (send_sizes)
DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes),
DRM_MEM_DRIVER);
if (must_free && !dev->context_flag) {
if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
clear_bit(0, &dev->interrupt_flag);
return retcode;
}
static int gamma_dma_send_buffers(struct file *filp,
drm_device_t *dev, drm_dma_t *d)
{
DECLARE_WAITQUEUE(entry, current);
drm_buf_t *last_buf = NULL;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
int send_index;
if (get_user(send_index, &d->send_indices[d->send_count-1]))
return -EFAULT;
if (d->flags & _DRM_DMA_BLOCK) {
last_buf = dma->buflist[send_index];
add_wait_queue(&last_buf->dma_wait, &entry);
}
if ((retcode = gamma_dma_enqueue(filp, d))) {
if (d->flags & _DRM_DMA_BLOCK)
remove_wait_queue(&last_buf->dma_wait, &entry);
return retcode;
}
gamma_dma_schedule(dev, 0);
if (d->flags & _DRM_DMA_BLOCK) {
DRM_DEBUG("%d waiting\n", current->pid);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!last_buf->waiting && !last_buf->pending)
break; /* finished */
schedule();
if (signal_pending(current)) {
retcode = -EINTR; /* Can't restart */
break;
}
}
current->state = TASK_RUNNING;
DRM_DEBUG("%d running\n", current->pid);
remove_wait_queue(&last_buf->dma_wait, &entry);
if (!retcode
|| (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
if (!waitqueue_active(&last_buf->dma_wait)) {
gamma_free_buffer(dev, last_buf);
}
}
if (retcode) {
DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
d->context,
last_buf->waiting,
last_buf->pending,
(long)DRM_WAITCOUNT(dev, d->context),
last_buf->idx,
last_buf->list,
current->pid);
}
}
return retcode;
}
int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_dma_t __user *argp = (void __user *)arg;
drm_dma_t d;
if (copy_from_user(&d, argp, sizeof(d)))
return -EFAULT;
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
current->pid, d.send_count, dma->buf_count);
return -EINVAL;
}
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, d.request_count, dma->buf_count);
return -EINVAL;
}
if (d.send_count) {
if (d.flags & _DRM_DMA_PRIORITY)
retcode = gamma_dma_priority(filp, dev, &d);
else
retcode = gamma_dma_send_buffers(filp, dev, &d);
}
d.granted_count = 0;
if (!retcode && d.request_count) {
retcode = gamma_dma_get_buffers(filp, &d);
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
if (copy_to_user(argp, &d, sizeof(d)))
return -EFAULT;
return retcode;
}
/* =============================================================
* DMA initialization, cleanup
*/
static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
{
drm_gamma_private_t *dev_priv;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
int i;
struct list_head *list;
unsigned long *pgt;
DRM_DEBUG( "%s\n", __FUNCTION__ );
dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
DRM_MEM_DRIVER );
if ( !dev_priv )
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
dev_priv->num_rast = init->num_rast;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
if( r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK ) {
dev_priv->sarea = r_list->map;
break;
}
}
dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
dev_priv->sarea_priv = (drm_gamma_sarea_t *)
((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
if (init->pcimode) {
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
pgt = buf->address;
for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
buf = dma->buflist[i];
*pgt = virt_to_phys((void*)buf->address) | 0x07;
pgt++;
}
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
} else {
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
drm_core_ioremap( dev->agp_buffer_map, dev);
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
pgt = buf->address;
for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
buf = dma->buflist[i];
*pgt = (unsigned long)buf->address + 0x07;
pgt++;
}
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
}
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
return 0;
}
int gamma_do_cleanup_dma( drm_device_t *dev )
{
DRM_DEBUG( "%s\n", __FUNCTION__ );
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
if ( dev->irq_enabled )
DRM(irq_uninstall)(dev);
if ( dev->dev_private ) {
if ( dev->agp_buffer_map != NULL )
drm_core_ioremapfree( dev->agp_buffer_map, dev );
DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
DRM_MEM_DRIVER );
dev->dev_private = NULL;
}
return 0;
}
int gamma_dma_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_gamma_init_t init;
LOCK_TEST_WITH_RETURN( dev, filp );
if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
return -EFAULT;
switch ( init.func ) {
case GAMMA_INIT_DMA:
return gamma_do_init_dma( dev, &init );
case GAMMA_CLEANUP_DMA:
return gamma_do_cleanup_dma( dev );
}
return -EINVAL;
}
static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
{
drm_device_dma_t *dma = dev->dma;
unsigned int *screenbuf;
DRM_DEBUG( "%s\n", __FUNCTION__ );
/* We've DRM_RESTRICTED this DMA buffer */
screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
#if 0
*buffer++ = 0x180; /* Tag (FilterMode) */
*buffer++ = 0x200; /* Allow FBColor through */
*buffer++ = 0x53B; /* Tag */
*buffer++ = copy->Pitch;
*buffer++ = 0x53A; /* Tag */
*buffer++ = copy->SrcAddress;
*buffer++ = 0x539; /* Tag */
*buffer++ = copy->WidthHeight; /* Initiates transfer */
*buffer++ = 0x53C; /* Tag - DMAOutputAddress */
*buffer++ = virt_to_phys((void*)screenbuf);
*buffer++ = 0x53D; /* Tag - DMAOutputCount */
*buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
/* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
/* Now put it back to the screen */
*buffer++ = 0x180; /* Tag (FilterMode) */
*buffer++ = 0x400; /* Allow Sync through */
*buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
*buffer++ = 0x155; /* FBSourceData | count */
*buffer++ = 0x537; /* Tag */
*buffer++ = copy->Pitch;
*buffer++ = 0x536; /* Tag */
*buffer++ = copy->DstAddress;
*buffer++ = 0x535; /* Tag */
*buffer++ = copy->WidthHeight; /* Initiates transfer */
*buffer++ = 0x530; /* Tag - DMAAddr */
*buffer++ = virt_to_phys((void*)screenbuf);
*buffer++ = 0x531;
*buffer++ = copy->Count; /* initiates DMA transfer of color data */
#endif
/* need to dispatch it now */
return 0;
}
int gamma_dma_copy( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_gamma_copy_t copy;
if ( copy_from_user( &copy, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
return -EFAULT;
return gamma_do_copy_dma( dev, &copy );
}
/* =============================================================
* Per Context SAREA Support
*/
int gamma_getsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
if ((int)request.ctx_id >= dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
map = dev->context_sareas[request.ctx_id];
up(&dev->struct_sem);
request.handle = map->handle;
if (copy_to_user(argp, &request, sizeof(request)))
return -EFAULT;
return 0;
}
int gamma_setsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
struct list_head *list;
if (copy_from_user(&request,
(drm_ctx_priv_map_t __user *)arg,
sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
r_list = NULL;
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
if(r_list->map &&
r_list->map->handle == request.handle) break;
}
if (list == &(dev->maplist->head)) {
up(&dev->struct_sem);
return -EINVAL;
}
map = r_list->map;
up(&dev->struct_sem);
if (!map) return -EINVAL;
down(&dev->struct_sem);
if ((int)request.ctx_id >= dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
dev->context_sareas[request.ctx_id] = map;
up(&dev->struct_sem);
return 0;
}
void gamma_driver_irq_preinstall( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
cpu_relax();
GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
}
void gamma_driver_irq_postinstall( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
cpu_relax();
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
}
void gamma_driver_irq_uninstall( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
if (!dev_priv)
return;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
cpu_relax();
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );
}
extern drm_ioctl_desc_t DRM(ioctls)[];
static int gamma_driver_preinit(drm_device_t *dev)
{
/* reset the finish ioctl */
DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
return 0;
}
static void gamma_driver_pretakedown(drm_device_t *dev)
{
gamma_do_cleanup_dma(dev);
}
static void gamma_driver_dma_ready(drm_device_t *dev)
{
gamma_dma_ready(dev);
}
static int gamma_driver_dma_quiescent(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv = (
drm_gamma_private_t *)dev->dev_private;
if (dev_priv->num_rast == 2)
gamma_dma_quiescent_dual(dev);
else gamma_dma_quiescent_single(dev);
return 0;
}
void gamma_driver_register_fns(drm_device_t *dev)
{
dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
DRM(fops).read = gamma_fops_read;
DRM(fops).poll = gamma_fops_poll;
dev->driver.preinit = gamma_driver_preinit;
dev->driver.pretakedown = gamma_driver_pretakedown;
dev->driver.dma_ready = gamma_driver_dma_ready;
dev->driver.dma_quiescent = gamma_driver_dma_quiescent;
dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush;
dev->driver.dma_flush_unblock = gamma_flush_unblock;
}
#ifndef _GAMMA_DRM_H_
#define _GAMMA_DRM_H_
typedef struct _drm_gamma_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_gamma_tex_region_t;
typedef struct {
unsigned int GDeltaMode;
unsigned int GDepthMode;
unsigned int GGeometryMode;
unsigned int GTransformMode;
} drm_gamma_context_regs_t;
typedef struct _drm_gamma_sarea {
drm_gamma_context_regs_t context_state;
unsigned int dirty;
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
* age different to the one you set, then you are mistaken and
* it has been stolen by another client. If global texAge
* hasn't changed, there is no need to walk the list.
*
* These regions can be used as a proxy for the fine-grained
* texture information of other clients - by maintaining them
* in the same lru which is used to age their own textures,
* clients have an approximate lru for the whole of global
* texture space, and can make informed decisions as to which
* areas to kick out. There is no need to choose whether to
* kick out your own texture or someone else's - simply eject
* them all in LRU order.
*/
#define GAMMA_NR_TEX_REGIONS 64
drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
int vertex_prim;
} drm_gamma_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmGamma.h)
*/
/* Gamma specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t)
#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t)
typedef struct drm_gamma_copy {
unsigned int DMAOutputAddress;
unsigned int DMAOutputCount;
unsigned int DMAReadGLINTSource;
unsigned int DMARectangleWriteAddress;
unsigned int DMARectangleWriteLinePitch;
unsigned int DMARectangleWrite;
unsigned int DMARectangleReadAddress;
unsigned int DMARectangleReadLinePitch;
unsigned int DMARectangleRead;
unsigned int DMARectangleReadTarget;
} drm_gamma_copy_t;
typedef struct drm_gamma_init {
enum {
GAMMA_INIT_DMA = 0x01,
GAMMA_CLEANUP_DMA = 0x02
} func;
int sarea_priv_offset;
int pcimode;
unsigned int mmio0;
unsigned int mmio1;
unsigned int mmio2;
unsigned int mmio3;
unsigned int buffers_offset;
int num_rast;
} drm_gamma_init_t;
#endif /* _GAMMA_DRM_H_ */
/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
#include "gamma.h"
#include "drmP.h"
#include "drm.h"
#include "gamma_drm.h"
#include "gamma_drv.h"
#include "drm_auth.h"
#include "drm_agpsupport.h"
#include "drm_bufs.h"
#include "gamma_context.h" /* NOTE! */
#include "drm_dma.h"
#include "gamma_old_dma.h" /* NOTE */
#include "drm_drawable.h"
#include "drm_drv.h"
#include "drm_fops.h"
#include "drm_init.h"
#include "drm_ioctl.h"
#include "drm_irq.h"
#include "gamma_lists.h" /* NOTE */
#include "drm_lock.h"
#include "gamma_lock.h" /* NOTE */
#include "drm_memory.h"
#include "drm_proc.h"
#include "drm_vm.h"
#include "drm_stub.h"
#include "drm_scatter.h"
/* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#ifndef _GAMMA_DRV_H_
#define _GAMMA_DRV_H_
typedef struct drm_gamma_private {
drm_gamma_sarea_t *sarea_priv;
drm_map_t *sarea;
drm_map_t *mmio0;
drm_map_t *mmio1;
drm_map_t *mmio2;
drm_map_t *mmio3;
int num_rast;
} drm_gamma_private_t;
/* gamma_dma.c */
extern int gamma_dma_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int gamma_dma_copy( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int gamma_do_cleanup_dma( drm_device_t *dev );
extern void gamma_dma_ready(drm_device_t *dev);
extern void gamma_dma_quiescent_single(drm_device_t *dev);
extern void gamma_dma_quiescent_dual(drm_device_t *dev);
/* gamma_dma.c */
extern int gamma_dma_schedule(drm_device_t *dev, int locked);
extern int gamma_dma(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int gamma_find_devices(void);
extern int gamma_found(void);
/* Gamma-specific code pulled from drm_fops.h:
*/
extern int DRM(finish)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(flush_unblock)(drm_device_t *dev, int context,
drm_lock_flags_t flags);
extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags);
/* Gamma-specific code pulled from drm_dma.h:
*/
extern void DRM(clear_next_buffer)(drm_device_t *dev);
extern int DRM(select_queue)(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
/* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h):
*/
extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
extern int DRM(freelist_destroy)(drm_freelist_t *bl);
extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
/* externs for gamma changes to the ops */
extern struct file_operations DRM(fops);
extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait);
extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off);
#define GLINT_DRI_BUF_COUNT 256
#define GAMMA_OFF(reg) \
((reg < 0x1000) \
? reg \
: ((reg < 0x10000) \
? (reg - 0x1000) \
: ((reg < 0x11000) \
? (reg - 0x10000) \
: (reg - 0x11000))))
#define GAMMA_BASE(reg) ((unsigned long) \
((reg < 0x1000) ? dev_priv->mmio0->handle : \
((reg < 0x10000) ? dev_priv->mmio1->handle : \
((reg < 0x11000) ? dev_priv->mmio2->handle : \
dev_priv->mmio3->handle))))
#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
#define GAMMA_READ(reg) GAMMA_DEREF(reg)
#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
#define GAMMA_BROADCASTMASK 0x9378
#define GAMMA_COMMANDINTENABLE 0x0c48
#define GAMMA_DMAADDRESS 0x0028
#define GAMMA_DMACOUNT 0x0030
#define GAMMA_FILTERMODE 0x8c00
#define GAMMA_GCOMMANDINTFLAGS 0x0c50
#define GAMMA_GCOMMANDMODE 0x0c40
#define GAMMA_QUEUED_DMA_MODE 1<<1
#define GAMMA_GCOMMANDSTATUS 0x0c60
#define GAMMA_GDELAYTIMER 0x0c38
#define GAMMA_GDMACONTROL 0x0060
#define GAMMA_USE_AGP 1<<1
#define GAMMA_GINTENABLE 0x0808
#define GAMMA_GINTFLAGS 0x0810
#define GAMMA_INFIFOSPACE 0x0018
#define GAMMA_OUTFIFOWORDS 0x0020
#define GAMMA_OUTPUTFIFO 0x2000
#define GAMMA_SYNC 0x8c40
#define GAMMA_SYNC_TAG 0x0188
#define GAMMA_PAGETABLEADDR 0x0C00
#define GAMMA_PAGETABLELENGTH 0x0C08
#define GAMMA_PASSTHROUGH 0x1FE
#define GAMMA_DMAADDRTAG 0x530
#define GAMMA_DMACOUNTTAG 0x531
#define GAMMA_COMMANDINTTAG 0x532
#endif
/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
* Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#include "drmP.h"
int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
{
if (bl->count) return -EINVAL;
bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
if(!bl->bufs) return -ENOMEM;
memset(bl->bufs, 0, sizeof(*bl->bufs));
bl->count = count;
bl->rp = bl->bufs;
bl->wp = bl->bufs;
bl->end = &bl->bufs[bl->count+1];
spin_lock_init(&bl->write_lock);
spin_lock_init(&bl->read_lock);
return 0;
}
int DRM(waitlist_destroy)(drm_waitlist_t *bl)
{
if (bl->rp != bl->wp) return -EINVAL;
if (bl->bufs) DRM(free)(bl->bufs,
(bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
bl->count = 0;
bl->bufs = NULL;
bl->rp = NULL;
bl->wp = NULL;
bl->end = NULL;
return 0;
}
int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
{
int left;
unsigned long flags;
left = DRM_LEFTCOUNT(bl);
if (!left) {
DRM_ERROR("Overflow while adding buffer %d from filp %p\n",
buf->idx, buf->filp);
return -EINVAL;
}
buf->list = DRM_LIST_WAIT;
spin_lock_irqsave(&bl->write_lock, flags);
*bl->wp = buf;
if (++bl->wp >= bl->end) bl->wp = bl->bufs;
spin_unlock_irqrestore(&bl->write_lock, flags);
return 0;
}
drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
{
drm_buf_t *buf;
unsigned long flags;
spin_lock_irqsave(&bl->read_lock, flags);
buf = *bl->rp;
if (bl->rp == bl->wp) {
spin_unlock_irqrestore(&bl->read_lock, flags);
return NULL;
}
if (++bl->rp >= bl->end) bl->rp = bl->bufs;
spin_unlock_irqrestore(&bl->read_lock, flags);
return buf;
}
int DRM(freelist_create)(drm_freelist_t *bl, int count)
{
atomic_set(&bl->count, 0);
bl->next = NULL;
init_waitqueue_head(&bl->waiting);
bl->low_mark = 0;
bl->high_mark = 0;
atomic_set(&bl->wfh, 0);
spin_lock_init(&bl->lock);
++bl->initialized;
return 0;
}
int DRM(freelist_destroy)(drm_freelist_t *bl)
{
atomic_set(&bl->count, 0);
bl->next = NULL;
return 0;
}
int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
{
drm_device_dma_t *dma = dev->dma;
if (!dma) {
DRM_ERROR("No DMA support\n");
return 1;
}
if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
if (!bl) return 1;
buf->list = DRM_LIST_FREE;
spin_lock(&bl->lock);
buf->next = bl->next;
bl->next = buf;
spin_unlock(&bl->lock);
atomic_inc(&bl->count);
if (atomic_read(&bl->count) > dma->buf_count) {
DRM_ERROR("%d of %d buffers free after addition of %d\n",
atomic_read(&bl->count), dma->buf_count, buf->idx);
return 1;
}
/* Check for high water mark */
if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
atomic_set(&bl->wfh, 0);
wake_up_interruptible(&bl->waiting);
}
return 0;
}
static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
{
drm_buf_t *buf;
if (!bl) return NULL;
/* Get buffer */
spin_lock(&bl->lock);
if (!bl->next) {
spin_unlock(&bl->lock);
return NULL;
}
buf = bl->next;
bl->next = bl->next->next;
spin_unlock(&bl->lock);
atomic_dec(&bl->count);
buf->next = NULL;
buf->list = DRM_LIST_NONE;
if (buf->waiting || buf->pending) {
DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
return buf;
}
drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
{
drm_buf_t *buf = NULL;
DECLARE_WAITQUEUE(entry, current);
if (!bl || !bl->initialized) return NULL;
/* Check for low water mark */
if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
atomic_set(&bl->wfh, 1);
if (atomic_read(&bl->wfh)) {
if (block) {
add_wait_queue(&bl->waiting, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!atomic_read(&bl->wfh)
&& (buf = DRM(freelist_try)(bl))) break;
schedule();
if (signal_pending(current)) break;
}
current->state = TASK_RUNNING;
remove_wait_queue(&bl->waiting, &entry);
}
return buf;
}
return DRM(freelist_try)(bl);
}
/* lock.c -- IOCTLs for locking -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
/* Gamma-specific code extracted from drm_lock.h:
*/
static int DRM(flush_queue)(drm_device_t *dev, int context)
{
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
atomic_inc(&q->block_write);
add_wait_queue(&q->flush_queue, &entry);
atomic_inc(&q->block_count);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!DRM_BUFCOUNT(&q->waitlist)) break;
schedule();
if (signal_pending(current)) {
ret = -EINTR; /* Can't restart */
break;
}
}
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->flush_queue, &entry);
}
atomic_dec(&q->use_count);
/* NOTE: block_write is still incremented!
Use drm_flush_unlock_queue to decrement. */
return ret;
}
static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
{
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
if (atomic_read(&q->block_write)) {
atomic_dec(&q->block_write);
wake_up_interruptible(&q->write_queue);
}
}
atomic_dec(&q->use_count);
return 0;
}
int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = DRM(flush_queue)(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = DRM(flush_queue)(dev, i);
}
}
return ret;
}
int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = DRM(flush_unblock_queue)(dev, i);
}
}
return ret;
}
int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int ret = 0;
drm_lock_t lock;
DRM_DEBUG("\n");
if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
return -EFAULT;
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
DRM(flush_unblock)(dev, lock.context, lock.flags);
return ret;
}
/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
/* Gamma-specific code pulled from drm_dma.h:
*/
void DRM(clear_next_buffer)(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
dma->next_buffer = NULL;
if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
wake_up_interruptible(&dma->next_queue->flush_queue);
}
dma->next_queue = NULL;
}
int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
{
int i;
int candidate = -1;
int j = jiffies;
if (!dev) {
DRM_ERROR("No device\n");
return -1;
}
if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
/* This only happens between the time the
interrupt is initialized and the time
the queues are initialized. */
return -1;
}
/* Doing "while locked" DMA? */
if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
return DRM_KERNEL_CONTEXT;
}
/* If there are buffers on the last_context
queue, and we have not been executing
this context very long, continue to
execute this context. */
if (dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j
&& DRM_WAITCOUNT(dev, dev->last_context)) {
return dev->last_context;
}
/* Otherwise, find a candidate */
for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
if (candidate < 0) {
for (i = 0; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
}
if (wrapper
&& candidate >= 0
&& candidate != dev->last_context
&& dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j) {
if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
del_timer(&dev->timer);
dev->timer.function = wrapper;
dev->timer.data = (unsigned long)dev;
dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
add_timer(&dev->timer);
}
return -1;
}
return candidate;
}
int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i;
drm_queue_t *q;
drm_buf_t *buf;
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
int *ind;
int err;
DECLARE_WAITQUEUE(entry, current);
DRM_DEBUG("%d\n", d->send_count);
if (d->flags & _DRM_DMA_WHILE_LOCKED) {
int context = dev->lock.hw_lock->lock;
if (!_DRM_LOCK_IS_HELD(context)) {
DRM_ERROR("No lock held during \"while locked\""
" request\n");
return -EINVAL;
}
if (d->context != _DRM_LOCKING_CONTEXT(context)
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
DRM_ERROR("Lock held by %d while %d makes"
" \"while locked\" request\n",
_DRM_LOCKING_CONTEXT(context),
d->context);
return -EINVAL;
}
q = dev->queuelist[DRM_KERNEL_CONTEXT];
while_locked = 1;
} else {
q = dev->queuelist[d->context];
}
atomic_inc(&q->use_count);
if (atomic_read(&q->block_write)) {
add_wait_queue(&q->write_queue, &entry);
atomic_inc(&q->block_count);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!atomic_read(&q->block_write)) break;
schedule();
if (signal_pending(current)) {
atomic_dec(&q->use_count);
remove_wait_queue(&q->write_queue, &entry);
return -EINTR;
}
}
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->write_queue, &entry);
}
ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
if (!ind)
return -ENOMEM;
if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
err = -EFAULT;
goto out;
}
err = -EINVAL;
for (i = 0; i < d->send_count; i++) {
idx = ind[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
ind[i], dma->buf_count - 1);
goto out;
}
buf = dma->buflist[ idx ];
if (buf->filp != filp) {
DRM_ERROR("Process %d using buffer not owned\n",
current->pid);
goto out;
}
if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using buffer %d on list %d\n",
current->pid, buf->idx, buf->list);
goto out;
}
buf->used = ind[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
ind[i], i);
goto out;
}
if (buf->waiting) {
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
ind[i], i);
goto out;
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
|| atomic_read(&q->finalization)) {
DRM(free_buffer)(dev, buf);
} else {
DRM(waitlist_put)(&q->waitlist, buf);
atomic_inc(&q->total_queued);
}
}
atomic_dec(&q->use_count);
return 0;
out:
DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
atomic_dec(&q->use_count);
return err;
}
static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
int order)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i;
drm_buf_t *buf;
drm_device_dma_t *dma = dev->dma;
for (i = d->granted_count; i < d->request_count; i++) {
buf = DRM(freelist_get)(&dma->bufs[order].freelist,
d->flags & _DRM_DMA_WAIT);
if (!buf) break;
if (buf->pending || buf->waiting) {
DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
buf->idx,
buf->filp,
buf->waiting,
buf->pending);
}
buf->filp = filp;
if (copy_to_user(&d->request_indices[i],
&buf->idx,
sizeof(buf->idx)))
return -EFAULT;
if (copy_to_user(&d->request_sizes[i],
&buf->total,
sizeof(buf->total)))
return -EFAULT;
++d->granted_count;
}
return 0;
}
int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
{
int order;
int retcode = 0;
int tmp_order;
order = DRM(order)(dma->request_size);
dma->granted_count = 0;
retcode = DRM(dma_get_buffers_of_order)(filp, dma, order);
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_SMALLER_OK)) {
for (tmp_order = order - 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order >= DRM_MIN_ORDER;
--tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(filp, dma,
tmp_order);
}
}
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_LARGER_OK)) {
for (tmp_order = order + 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order <= DRM_MAX_ORDER;
++tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(filp, dma,
tmp_order);
}
}
return 0;
}
...@@ -45,11 +45,6 @@ ...@@ -45,11 +45,6 @@
#define I810_BUF_UNMAPPED 0 #define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1 #define I810_BUF_MAPPED 1
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down
#define up_write up
#endif
static drm_buf_t *i810_freelist_get(drm_device_t *dev) static drm_buf_t *i810_freelist_get(drm_device_t *dev)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
...@@ -351,6 +346,7 @@ static int i810_dma_initialize(drm_device_t *dev, ...@@ -351,6 +346,7 @@ static int i810_dma_initialize(drm_device_t *dev,
DRM_ERROR("can not find mmio map!\n"); DRM_ERROR("can not find mmio map!\n");
return -EINVAL; return -EINVAL;
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) { if (!dev->agp_buffer_map) {
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
...@@ -1383,3 +1379,19 @@ drm_ioctl_desc_t i810_ioctls[] = { ...@@ -1383,3 +1379,19 @@ drm_ioctl_desc_t i810_ioctls[] = {
}; };
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
int i810_driver_device_is_agp(drm_device_t * dev)
{
return 1;
}
...@@ -84,6 +84,7 @@ static struct drm_driver driver = { ...@@ -84,6 +84,7 @@ static struct drm_driver driver = {
.dev_priv_size = sizeof(drm_i810_buf_priv_t), .dev_priv_size = sizeof(drm_i810_buf_priv_t),
.pretakedown = i810_driver_pretakedown, .pretakedown = i810_driver_pretakedown,
.prerelease = i810_driver_prerelease, .prerelease = i810_driver_prerelease,
.device_is_agp = i810_driver_device_is_agp,
.release = i810_driver_release, .release = i810_driver_release,
.dma_quiescent = i810_driver_dma_quiescent, .dma_quiescent = i810_driver_dma_quiescent,
.reclaim_buffers = i810_reclaim_buffers, .reclaim_buffers = i810_reclaim_buffers,
......
...@@ -120,6 +120,7 @@ extern int i810_driver_dma_quiescent(drm_device_t *dev); ...@@ -120,6 +120,7 @@ extern int i810_driver_dma_quiescent(drm_device_t *dev);
extern void i810_driver_release(drm_device_t *dev, struct file *filp); extern void i810_driver_release(drm_device_t *dev, struct file *filp);
extern void i810_driver_pretakedown(drm_device_t *dev); extern void i810_driver_pretakedown(drm_device_t *dev);
extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp); extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern int i810_driver_device_is_agp(drm_device_t * dev);
#define I810_BASE(reg) ((unsigned long) \ #define I810_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle) dev_priv->mmio_map->handle)
......
...@@ -47,11 +47,6 @@ ...@@ -47,11 +47,6 @@
#define I830_BUF_UNMAPPED 0 #define I830_BUF_UNMAPPED 0
#define I830_BUF_MAPPED 1 #define I830_BUF_MAPPED 1
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down
#define up_write up
#endif
static drm_buf_t *i830_freelist_get(drm_device_t *dev) static drm_buf_t *i830_freelist_get(drm_device_t *dev)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
...@@ -358,6 +353,7 @@ static int i830_dma_initialize(drm_device_t *dev, ...@@ -358,6 +353,7 @@ static int i830_dma_initialize(drm_device_t *dev,
DRM_ERROR("can not find mmio map!\n"); DRM_ERROR("can not find mmio map!\n");
return -EINVAL; return -EINVAL;
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) { if(!dev->agp_buffer_map) {
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
...@@ -1586,3 +1582,19 @@ drm_ioctl_desc_t i830_ioctls[] = { ...@@ -1586,3 +1582,19 @@ drm_ioctl_desc_t i830_ioctls[] = {
}; };
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i8xx is AGP.
*/
int i830_driver_device_is_agp(drm_device_t * dev)
{
return 1;
}
...@@ -88,6 +88,7 @@ static struct drm_driver driver = { ...@@ -88,6 +88,7 @@ static struct drm_driver driver = {
.dev_priv_size = sizeof(drm_i830_buf_priv_t), .dev_priv_size = sizeof(drm_i830_buf_priv_t),
.pretakedown = i830_driver_pretakedown, .pretakedown = i830_driver_pretakedown,
.prerelease = i830_driver_prerelease, .prerelease = i830_driver_prerelease,
.device_is_agp = i830_driver_device_is_agp,
.release = i830_driver_release, .release = i830_driver_release,
.dma_quiescent = i830_driver_dma_quiescent, .dma_quiescent = i830_driver_dma_quiescent,
.reclaim_buffers = i830_reclaim_buffers, .reclaim_buffers = i830_reclaim_buffers,
......
...@@ -137,6 +137,7 @@ extern void i830_driver_pretakedown(drm_device_t *dev); ...@@ -137,6 +137,7 @@ extern void i830_driver_pretakedown(drm_device_t *dev);
extern void i830_driver_release(drm_device_t *dev, struct file *filp); extern void i830_driver_release(drm_device_t *dev, struct file *filp);
extern int i830_driver_dma_quiescent(drm_device_t *dev); extern int i830_driver_dma_quiescent(drm_device_t *dev);
extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp); extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern int i830_driver_device_is_agp(drm_device_t * dev);
#define I830_BASE(reg) ((unsigned long) \ #define I830_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle) dev_priv->mmio_map->handle)
......
...@@ -95,9 +95,8 @@ static int i915_dma_cleanup(drm_device_t * dev) ...@@ -95,9 +95,8 @@ static int i915_dma_cleanup(drm_device_t * dev)
drm_core_ioremapfree( &dev_priv->ring.map, dev); drm_core_ioremapfree( &dev_priv->ring.map, dev);
} }
if (dev_priv->hw_status_page) { if (dev_priv->status_page_dmah) {
drm_pci_free(dev, PAGE_SIZE, dev_priv->hw_status_page, drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->dma_status_page);
/* Need to rewrite hardware status page */ /* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000); I915_WRITE(0x02080, 0x1ffff000);
} }
...@@ -174,16 +173,18 @@ static int i915_initialize(drm_device_t * dev, ...@@ -174,16 +173,18 @@ static int i915_initialize(drm_device_t * dev,
dev_priv->allow_batchbuffer = 1; dev_priv->allow_batchbuffer = 1;
/* Program Hardware Status Page */ /* Program Hardware Status Page */
dev_priv->hw_status_page = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
0xffffffff, 0xffffffff);
&dev_priv->dma_status_page);
if (!dev_priv->hw_status_page) { if (!dev_priv->status_page_dmah) {
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev); i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n"); DRM_ERROR("Can not allocate hardware status page\n");
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE); memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
...@@ -731,3 +732,19 @@ drm_ioctl_desc_t i915_ioctls[] = { ...@@ -731,3 +732,19 @@ drm_ioctl_desc_t i915_ioctls[] = {
}; };
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i9x5 is AGP.
*/
int i915_driver_device_is_agp(drm_device_t * dev)
{
return 1;
}
...@@ -79,6 +79,7 @@ static struct drm_driver driver = { ...@@ -79,6 +79,7 @@ static struct drm_driver driver = {
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.pretakedown = i915_driver_pretakedown, .pretakedown = i915_driver_pretakedown,
.prerelease = i915_driver_prerelease, .prerelease = i915_driver_prerelease,
.device_is_agp = i915_driver_device_is_agp,
.irq_preinstall = i915_driver_irq_preinstall, .irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall, .irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall, .irq_uninstall = i915_driver_irq_uninstall,
......
...@@ -79,9 +79,10 @@ typedef struct drm_i915_private { ...@@ -79,9 +79,10 @@ typedef struct drm_i915_private {
drm_i915_sarea_t *sarea_priv; drm_i915_sarea_t *sarea_priv;
drm_i915_ring_buffer_t ring; drm_i915_ring_buffer_t ring;
drm_dma_handle_t *status_page_dmah;
void *hw_status_page; void *hw_status_page;
unsigned long counter;
dma_addr_t dma_status_page; dma_addr_t dma_status_page;
unsigned long counter;
int back_offset; int back_offset;
int front_offset; int front_offset;
...@@ -102,6 +103,7 @@ typedef struct drm_i915_private { ...@@ -102,6 +103,7 @@ typedef struct drm_i915_private {
extern void i915_kernel_lost_context(drm_device_t * dev); extern void i915_kernel_lost_context(drm_device_t * dev);
extern void i915_driver_pretakedown(drm_device_t *dev); extern void i915_driver_pretakedown(drm_device_t *dev);
extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp); extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern int i915_driver_device_is_agp(drm_device_t *dev);
/* i915_irq.c */ /* i915_irq.c */
extern int i915_irq_emit(DRM_IOCTL_ARGS); extern int i915_irq_emit(DRM_IOCTL_ARGS);
......
...@@ -23,18 +23,21 @@ ...@@ -23,18 +23,21 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/
/**
* \file mga_dma.c
* DMA support for MGA G200 / G400.
* *
* Authors: * \author Rickard E. (Rik) Faith <faith@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com> * \author Jeff Hartmann <jhartmann@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com> * \author Keith Whitwell <keith@tungstengraphics.com>
* Keith Whitwell <keith@tungstengraphics.com> * \author Gareth Hughes <gareth@valinux.com>
*
* Rewritten by:
* Gareth Hughes <gareth@valinux.com>
*/ */
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"
#include "drm_sarea.h"
#include "mga_drm.h" #include "mga_drm.h"
#include "mga_drv.h" #include "mga_drv.h"
...@@ -148,7 +151,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv ) ...@@ -148,7 +151,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv )
DRM_DEBUG( " space = 0x%06x\n", primary->space ); DRM_DEBUG( " space = 0x%06x\n", primary->space );
mga_flush_write_combine(); mga_flush_write_combine();
MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
DRM_DEBUG( "done.\n" ); DRM_DEBUG( "done.\n" );
} }
...@@ -190,7 +193,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ) ...@@ -190,7 +193,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
DRM_DEBUG( " space = 0x%06x\n", primary->space ); DRM_DEBUG( " space = 0x%06x\n", primary->space );
mga_flush_write_combine(); mga_flush_write_combine();
MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
set_bit( 0, &primary->wrapped ); set_bit( 0, &primary->wrapped );
DRM_DEBUG( "done.\n" ); DRM_DEBUG( "done.\n" );
...@@ -396,23 +399,383 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ) ...@@ -396,23 +399,383 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
* DMA initialization, cleanup * DMA initialization, cleanup
*/ */
int mga_driver_preinit(drm_device_t *dev, unsigned long flags)
{
drm_mga_private_t * dev_priv;
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if (!dev_priv)
return DRM_ERR(ENOMEM);
dev->dev_private = (void *)dev_priv;
memset(dev_priv, 0, sizeof(drm_mga_private_t));
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
return 0;
}
/**
* Bootstrap the driver for AGP DMA.
*
* \todo
* Investigate whether there is any benifit to storing the WARP microcode in
* AGP memory. If not, the microcode may as well always be put in PCI
* memory.
*
* \todo
* This routine needs to set dma_bs->agp_mode to the mode actually configured
* in the hardware. Looking just at the Linux AGP driver code, I don't see
* an easy way to determine this.
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
int err;
unsigned offset;
const unsigned secondary_size = dma_bs->secondary_bin_count
* dma_bs->secondary_bin_size;
const unsigned agp_size = (dma_bs->agp_size << 20);
drm_buf_desc_t req;
drm_agp_mode_t mode;
drm_agp_info_t info;
/* Acquire AGP. */
err = drm_agp_acquire(dev);
if (err) {
DRM_ERROR("Unable to acquire AGP\n");
return err;
}
err = drm_agp_info(dev, &info);
if (err) {
DRM_ERROR("Unable to get AGP info\n");
return err;
}
mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
err = drm_agp_enable(dev, mode);
if (err) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
return err;
}
/* In addition to the usual AGP mode configuration, the G200 AGP cards
* need to have the AGP mode "manually" set.
*/
if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
if (mode.mode & 0x02) {
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
}
else {
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
}
}
/* Allocate and bind AGP memory. */
dev_priv->agp_pages = agp_size / PAGE_SIZE;
dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 );
if (dev_priv->agp_mem == NULL) {
dev_priv->agp_pages = 0;
DRM_ERROR("Unable to allocate %uMB AGP memory\n",
dma_bs->agp_size);
return DRM_ERR(ENOMEM);
}
err = drm_bind_agp( dev_priv->agp_mem, 0 );
if (err) {
DRM_ERROR("Unable to bind AGP memory\n");
return err;
}
offset = 0;
err = drm_addmap( dev, offset, warp_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
if (err) {
DRM_ERROR("Unable to map WARP microcode\n");
return err;
}
offset += warp_size;
err = drm_addmap( dev, offset, dma_bs->primary_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
if (err) {
DRM_ERROR("Unable to map primary DMA region\n");
return err;
}
offset += dma_bs->primary_size;
err = drm_addmap( dev, offset, secondary_size,
_DRM_AGP, 0, & dev->agp_buffer_map );
if (err) {
DRM_ERROR("Unable to map secondary DMA region\n");
return err;
}
(void) memset( &req, 0, sizeof(req) );
req.count = dma_bs->secondary_bin_count;
req.size = dma_bs->secondary_bin_size;
req.flags = _DRM_AGP_BUFFER;
req.agp_start = offset;
err = drm_addbufs_agp( dev, & req );
if (err) {
DRM_ERROR("Unable to add secondary DMA buffers\n");
return err;
}
offset += secondary_size;
err = drm_addmap( dev, offset, agp_size - offset,
_DRM_AGP, 0, & dev_priv->agp_textures );
if (err) {
DRM_ERROR("Unable to map AGP texture region\n");
return err;
}
drm_core_ioremap(dev_priv->warp, dev);
drm_core_ioremap(dev_priv->primary, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev_priv->warp->handle ||
!dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
dev_priv->warp->handle, dev_priv->primary->handle,
dev->agp_buffer_map->handle);
return DRM_ERR(ENOMEM);
}
dev_priv->dma_access = MGA_PAGPXFER;
dev_priv->wagp_enable = MGA_WAGP_ENABLE;
DRM_INFO("Initialized card for AGP DMA.\n");
return 0;
}
/**
* Bootstrap the driver for PCI DMA.
*
* \todo
* The algorithm for decreasing the size of the primary DMA buffer could be
* better. The size should be rounded up to the nearest page size, then
* decrease the request size by a single page each pass through the loop.
*
* \todo
* Determine whether the maximum address passed to drm_pci_alloc is correct.
* The same goes for drm_addbufs_pci.
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
unsigned int primary_size;
unsigned int bin_count;
int err;
drm_buf_desc_t req;
if (dev->dma == NULL) {
DRM_ERROR("dev->dma is NULL\n");
return DRM_ERR(EFAULT);
}
/* The proper alignment is 0x100 for this mapping */
err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->warp);
if (err != 0) {
DRM_ERROR("Unable to create mapping for WARP microcode\n");
return err;
}
/* Other than the bottom two bits being used to encode other
* information, there don't appear to be any restrictions on the
* alignment of the primary or secondary DMA buffers.
*/
for ( primary_size = dma_bs->primary_size
; primary_size != 0
; primary_size >>= 1 ) {
/* The proper alignment for this mapping is 0x04 */
err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->primary);
if (!err)
break;
}
if (err != 0) {
DRM_ERROR("Unable to allocate primary DMA region\n");
return DRM_ERR(ENOMEM);
}
if (dev_priv->primary->size != dma_bs->primary_size) {
DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
dma_bs->primary_size,
(unsigned) dev_priv->primary->size);
dma_bs->primary_size = dev_priv->primary->size;
}
for ( bin_count = dma_bs->secondary_bin_count
; bin_count > 0
; bin_count-- ) {
(void) memset( &req, 0, sizeof(req) );
req.count = bin_count;
req.size = dma_bs->secondary_bin_size;
err = drm_addbufs_pci( dev, & req );
if (!err) {
break;
}
}
if (bin_count == 0) {
DRM_ERROR("Unable to add secondary DMA buffers\n");
return err;
}
if (bin_count != dma_bs->secondary_bin_count) {
DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
"to %u.\n", dma_bs->secondary_bin_count, bin_count);
dma_bs->secondary_bin_count = bin_count;
}
dev_priv->dma_access = 0;
dev_priv->wagp_enable = 0;
dma_bs->agp_mode = 0;
DRM_INFO("Initialized card for PCI DMA.\n");
return 0;
}
static int mga_do_dma_bootstrap(drm_device_t * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
int err;
drm_mga_private_t * const dev_priv =
(drm_mga_private_t *) dev->dev_private;
dev_priv->used_new_dma_init = 1;
/* The first steps are the same for both PCI and AGP based DMA. Map
* the cards MMIO registers and map a status page.
*/
err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
_DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
if (err) {
DRM_ERROR("Unable to map MMIO region\n");
return err;
}
err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
& dev_priv->status );
if (err) {
DRM_ERROR("Unable to map status region\n");
return err;
}
/* The DMA initialization procedure is slightly different for PCI and
* AGP cards. AGP cards just allocate a large block of AGP memory and
* carve off portions of it for internal uses. The remaining memory
* is returned to user-mode to be used for AGP textures.
*/
if (is_agp) {
err = mga_do_agp_dma_bootstrap(dev, dma_bs);
}
/* If we attempted to initialize the card for AGP DMA but failed,
* clean-up any mess that may have been created.
*/
if (err) {
mga_do_cleanup_dma(dev);
}
/* Not only do we want to try and initialized PCI cards for PCI DMA,
* but we also try to initialized AGP cards that could not be
* initialized for AGP DMA. This covers the case where we have an AGP
* card in a system with an unsupported AGP chipset. In that case the
* card will be detected as AGP, but we won't be able to allocate any
* AGP memory, etc.
*/
if (!is_agp || err) {
err = mga_do_pci_dma_bootstrap(dev, dma_bs);
}
return err;
}
int mga_dma_bootstrap(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_dma_bootstrap_t bootstrap;
int err;
DRM_COPY_FROM_USER_IOCTL(bootstrap,
(drm_mga_dma_bootstrap_t __user *) data,
sizeof(bootstrap));
err = mga_do_dma_bootstrap(dev, & bootstrap);
if (! err) {
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
const drm_mga_private_t * const dev_priv =
(drm_mga_private_t *) dev->dev_private;
if (dev_priv->agp_textures != NULL) {
bootstrap.texture_handle = dev_priv->agp_textures->offset;
bootstrap.texture_size = dev_priv->agp_textures->size;
}
else {
bootstrap.texture_handle = 0;
bootstrap.texture_size = 0;
}
bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ];
if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap,
sizeof(bootstrap))) {
err = DRM_ERR(EFAULT);
}
}
else {
mga_do_cleanup_dma(dev);
}
return err;
}
static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
{ {
drm_mga_private_t *dev_priv; drm_mga_private_t *dev_priv;
int ret; int ret;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
if ( !dev_priv )
return DRM_ERR(ENOMEM);
memset( dev_priv, 0, sizeof(drm_mga_private_t) );
dev_priv->chipset = init->chipset;
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; dev_priv = dev->dev_private;
if ( init->sgram ) { if (init->sgram) {
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
} else { } else {
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
...@@ -436,88 +799,66 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) ...@@ -436,88 +799,66 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
DRM_GETSAREA(); DRM_GETSAREA();
if(!dev_priv->sarea) { if (!dev_priv->sarea) {
DRM_ERROR( "failed to find sarea!\n" ); DRM_ERROR("failed to find sarea!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); if (! dev_priv->used_new_dma_init) {
if(!dev_priv->mmio) { dev_priv->status = drm_core_findmap(dev, init->status_offset);
DRM_ERROR( "failed to find mmio region!\n" ); if (!dev_priv->status) {
/* Assign dev_private so we can do cleanup. */ DRM_ERROR("failed to find status page!\n");
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev_priv->status = drm_core_findmap(dev, init->status_offset); dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if(!dev_priv->status) { if (!dev_priv->mmio) {
DRM_ERROR( "failed to find status page!\n" ); DRM_ERROR("failed to find mmio region!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev_priv->warp = drm_core_findmap(dev, init->warp_offset); dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
if(!dev_priv->warp) { if (!dev_priv->warp) {
DRM_ERROR( "failed to find warp microcode region!\n" ); DRM_ERROR("failed to find warp microcode region!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev_priv->primary = drm_core_findmap(dev, init->primary_offset); dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
if(!dev_priv->primary) { if (!dev_priv->primary) {
DRM_ERROR( "failed to find primary dma region!\n" ); DRM_ERROR("failed to find primary dma region!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) { if (!dev->agp_buffer_map) {
DRM_ERROR( "failed to find dma buffer region!\n" ); DRM_ERROR("failed to find dma buffer region!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
drm_core_ioremap(dev_priv->warp, dev);
drm_core_ioremap(dev_priv->primary, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
}
dev_priv->sarea_priv = dev_priv->sarea_priv =
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset); init->sarea_priv_offset);
drm_core_ioremap( dev_priv->warp, dev ); if (!dev_priv->warp->handle ||
drm_core_ioremap( dev_priv->primary, dev );
drm_core_ioremap( dev->agp_buffer_map, dev );
if(!dev_priv->warp->handle ||
!dev_priv->primary->handle || !dev_priv->primary->handle ||
!dev->agp_buffer_map->handle ) { ((dev_priv->dma_access != 0) &&
DRM_ERROR( "failed to ioremap agp regions!\n" ); ((dev->agp_buffer_map == NULL) ||
/* Assign dev_private so we can do cleanup. */ (dev->agp_buffer_map->handle == NULL)))) {
dev->dev_private = (void *)dev_priv; DRM_ERROR("failed to ioremap agp regions!\n");
mga_do_cleanup_dma( dev );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
ret = mga_warp_install_microcode( dev_priv ); ret = mga_warp_install_microcode(dev_priv);
if ( ret < 0 ) { if (ret < 0) {
DRM_ERROR( "failed to install WARP ucode!\n" ); DRM_ERROR("failed to install WARP ucode!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return ret; return ret;
} }
ret = mga_warp_init( dev_priv ); ret = mga_warp_init(dev_priv);
if ( ret < 0 ) { if (ret < 0) {
DRM_ERROR( "failed to init WARP engine!\n" ); DRM_ERROR("failed to init WARP engine!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return ret; return ret;
} }
...@@ -557,22 +898,18 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) ...@@ -557,22 +898,18 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
dev_priv->sarea_priv->last_frame.head = 0; dev_priv->sarea_priv->last_frame.head = 0;
dev_priv->sarea_priv->last_frame.wrap = 0; dev_priv->sarea_priv->last_frame.wrap = 0;
if ( mga_freelist_init( dev, dev_priv ) < 0 ) { if (mga_freelist_init(dev, dev_priv) < 0) {
DRM_ERROR( "could not initialize freelist\n" ); DRM_ERROR("could not initialize freelist\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
/* Make dev_private visable to others. */
dev->dev_private = (void *)dev_priv;
return 0; return 0;
} }
static int mga_do_cleanup_dma( drm_device_t *dev ) static int mga_do_cleanup_dma( drm_device_t *dev )
{ {
DRM_DEBUG( "\n" ); int err = 0;
DRM_DEBUG("\n");
/* Make sure interrupts are disabled here because the uninstall ioctl /* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private * may not have been called from userspace and after dev_private
...@@ -583,20 +920,49 @@ static int mga_do_cleanup_dma( drm_device_t *dev ) ...@@ -583,20 +920,49 @@ static int mga_do_cleanup_dma( drm_device_t *dev )
if ( dev->dev_private ) { if ( dev->dev_private ) {
drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_private_t *dev_priv = dev->dev_private;
if ( dev_priv->warp != NULL ) if ((dev_priv->warp != NULL)
drm_core_ioremapfree( dev_priv->warp, dev ); && (dev_priv->mmio->type != _DRM_CONSISTENT))
if ( dev_priv->primary != NULL ) drm_core_ioremapfree(dev_priv->warp, dev);
drm_core_ioremapfree( dev_priv->primary, dev );
if ( dev->agp_buffer_map != NULL ) if ((dev_priv->primary != NULL)
drm_core_ioremapfree( dev->agp_buffer_map, dev ); && (dev_priv->primary->type != _DRM_CONSISTENT))
drm_core_ioremapfree(dev_priv->primary, dev);
if ( dev_priv->head != NULL ) { if (dev->agp_buffer_map != NULL)
mga_freelist_cleanup( dev ); drm_core_ioremapfree(dev->agp_buffer_map, dev);
if (dev_priv->used_new_dma_init) {
if (dev_priv->agp_mem != NULL) {
dev_priv->agp_textures = NULL;
drm_unbind_agp(dev_priv->agp_mem);
drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages);
dev_priv->agp_pages = 0;
dev_priv->agp_mem = NULL;
} }
drm_free( dev->dev_private, sizeof(drm_mga_private_t), if ((dev->agp != NULL) && dev->agp->acquired) {
DRM_MEM_DRIVER ); err = drm_agp_release(dev);
dev->dev_private = NULL; }
dev_priv->used_new_dma_init = 0;
}
dev_priv->warp = NULL;
dev_priv->primary = NULL;
dev_priv->mmio = NULL;
dev_priv->status = NULL;
dev_priv->sarea = NULL;
dev_priv->sarea_priv = NULL;
dev->agp_buffer_map = NULL;
memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
dev_priv->warp_pipe = 0;
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
if (dev_priv->head != NULL) {
mga_freelist_cleanup(dev);
}
} }
return 0; return 0;
...@@ -606,14 +972,20 @@ int mga_dma_init( DRM_IOCTL_ARGS ) ...@@ -606,14 +972,20 @@ int mga_dma_init( DRM_IOCTL_ARGS )
{ {
DRM_DEVICE; DRM_DEVICE;
drm_mga_init_t init; drm_mga_init_t init;
int err;
LOCK_TEST_WITH_RETURN( dev, filp ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) ); DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
sizeof(init));
switch ( init.func ) { switch ( init.func ) {
case MGA_INIT_DMA: case MGA_INIT_DMA:
return mga_do_init_dma( dev, &init ); err = mga_do_init_dma(dev, &init);
if (err) {
(void) mga_do_cleanup_dma(dev);
}
return err;
case MGA_CLEANUP_DMA: case MGA_CLEANUP_DMA:
return mga_do_cleanup_dma( dev ); return mga_do_cleanup_dma( dev );
} }
...@@ -742,7 +1114,21 @@ int mga_dma_buffers( DRM_IOCTL_ARGS ) ...@@ -742,7 +1114,21 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
return ret; return ret;
} }
void mga_driver_pretakedown(drm_device_t *dev) /**
* Called just before the module is unloaded.
*/
int mga_driver_postcleanup(drm_device_t * dev)
{
drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
return 0;
}
/**
* Called when the last opener of the device is closed.
*/
void mga_driver_pretakedown(drm_device_t * dev)
{ {
mga_do_cleanup_dma( dev ); mga_do_cleanup_dma( dev );
} }
......
...@@ -73,7 +73,8 @@ ...@@ -73,7 +73,8 @@
#define MGA_CARD_TYPE_G200 1 #define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2 #define MGA_CARD_TYPE_G400 2
#define MGA_CARD_TYPE_G450 3 /* not currently used */
#define MGA_CARD_TYPE_G550 4
#define MGA_FRONT 0x1 #define MGA_FRONT 0x1
#define MGA_BACK 0x2 #define MGA_BACK 0x2
...@@ -225,10 +226,6 @@ typedef struct _drm_mga_sarea { ...@@ -225,10 +226,6 @@ typedef struct _drm_mga_sarea {
} drm_mga_sarea_t; } drm_mga_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmMga.h)
*/
/* MGA specific ioctls /* MGA specific ioctls
* The device specific ioctl range is 0x40 to 0x79. * The device specific ioctl range is 0x40 to 0x79.
*/ */
...@@ -243,6 +240,14 @@ typedef struct _drm_mga_sarea { ...@@ -243,6 +240,14 @@ typedef struct _drm_mga_sarea {
#define DRM_MGA_BLIT 0x08 #define DRM_MGA_BLIT 0x08
#define DRM_MGA_GETPARAM 0x09 #define DRM_MGA_GETPARAM 0x09
/* 3.2:
* ioctls for operating on fences.
*/
#define DRM_MGA_SET_FENCE 0x0a
#define DRM_MGA_WAIT_FENCE 0x0b
#define DRM_MGA_DMA_BOOTSTRAP 0x0c
#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
...@@ -253,6 +258,9 @@ typedef struct _drm_mga_sarea { ...@@ -253,6 +258,9 @@ typedef struct _drm_mga_sarea {
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) #define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) #define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) #define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
typedef struct _drm_mga_warp_index { typedef struct _drm_mga_warp_index {
int installed; int installed;
...@@ -291,12 +299,72 @@ typedef struct drm_mga_init { ...@@ -291,12 +299,72 @@ typedef struct drm_mga_init {
unsigned long buffers_offset; unsigned long buffers_offset;
} drm_mga_init_t; } drm_mga_init_t;
typedef struct drm_mga_fullscreen { typedef struct drm_mga_dma_bootstrap {
enum { /**
MGA_INIT_FULLSCREEN = 0x01, * \name AGP texture region
MGA_CLEANUP_FULLSCREEN = 0x02 *
} func; * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
} drm_mga_fullscreen_t; * be filled in with the actual AGP texture settings.
*
* \warning
* If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
* is zero, it means that PCI memory (most likely through the use of
* an IOMMU) is being used for "AGP" textures.
*/
/*@{*/
unsigned long texture_handle; /**< Handle used to map AGP textures. */
uint32_t texture_size; /**< Size of the AGP texture region. */
/*@}*/
/**
* Requested size of the primary DMA region.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
*/
uint32_t primary_size;
/**
* Requested number of secondary DMA buffers.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual number of secondary DMA buffers
* allocated. Particularly when PCI DMA is used, this may be
* (subtantially) less than the number requested.
*/
uint32_t secondary_bin_count;
/**
* Requested size of each secondary DMA buffer.
*
* While the kernel \b is free to reduce
* dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
* to reduce dma_mga_dma_bootstrap::secondary_bin_size.
*/
uint32_t secondary_bin_size;
/**
* Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
* \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
* zero, it means that PCI DMA should be used, even if AGP is
* possible.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
* (i.e., PCI DMA was used), this value will be zero.
*/
uint32_t agp_mode;
/**
* Desired AGP GART size, measured in megabytes.
*/
uint8_t agp_size;
} drm_mga_dma_bootstrap_t;
typedef struct drm_mga_clear { typedef struct drm_mga_clear {
unsigned int flags; unsigned int flags;
...@@ -341,6 +409,14 @@ typedef struct _drm_mga_blit { ...@@ -341,6 +409,14 @@ typedef struct _drm_mga_blit {
*/ */
#define MGA_PARAM_IRQ_NR 1 #define MGA_PARAM_IRQ_NR 1
/* 3.2: Query the actual card type. The DDX only distinguishes between
* G200 chips and non-G200 chips, which it calls G400. It turns out that
* there are some very sublte differences between the G4x0 chips and the G550
* chips. Using this parameter query, a client-side driver can detect the
* difference between a G4x0 and a G550.
*/
#define MGA_PARAM_CARD_TYPE 2
typedef struct drm_mga_getparam { typedef struct drm_mga_getparam {
int param; int param;
void __user *value; void __user *value;
......
...@@ -38,8 +38,15 @@ ...@@ -38,8 +38,15 @@
#include "drm_pciids.h" #include "drm_pciids.h"
static int mga_driver_device_is_agp(drm_device_t * dev);
static int postinit( struct drm_device *dev, unsigned long flags ) static int postinit( struct drm_device *dev, unsigned long flags )
{ {
drm_mga_private_t * const dev_priv =
(drm_mga_private_t *) dev->dev_private;
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
dev->counters += 3; dev->counters += 3;
dev->types[6] = _DRM_STAT_IRQ; dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY; dev->types[7] = _DRM_STAT_PRIMARY;
...@@ -79,8 +86,11 @@ extern int mga_max_ioctl; ...@@ -79,8 +86,11 @@ extern int mga_max_ioctl;
static struct drm_driver driver = { static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.preinit = mga_driver_preinit,
.postcleanup = mga_driver_postcleanup,
.pretakedown = mga_driver_pretakedown, .pretakedown = mga_driver_pretakedown,
.dma_quiescent = mga_driver_dma_quiescent, .dma_quiescent = mga_driver_dma_quiescent,
.device_is_agp = mga_driver_device_is_agp,
.vblank_wait = mga_driver_vblank_wait, .vblank_wait = mga_driver_vblank_wait,
.irq_preinstall = mga_driver_irq_preinstall, .irq_preinstall = mga_driver_irq_preinstall,
.irq_postinstall = mga_driver_irq_postinstall, .irq_postinstall = mga_driver_irq_postinstall,
...@@ -128,3 +138,38 @@ module_exit(mga_exit); ...@@ -128,3 +138,38 @@ module_exit(mga_exit);
MODULE_AUTHOR( DRIVER_AUTHOR ); MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
/**
* Determine if the device really is AGP or not.
*
* In addition to the usual tests performed by \c drm_device_is_agp, this
* function detects PCI G450 cards that appear to the system exactly like
* AGP G450 cards.
*
* \param dev The device to be tested.
*
* \returns
* If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
*/
int mga_driver_device_is_agp(drm_device_t * dev)
{
const struct pci_dev * const pdev = dev->pdev;
/* There are PCI versions of the G450. These cards have the
* same PCI ID as the AGP G450, but have an additional PCI-to-PCI
* bridge chip. We detect these cards, which are not currently
* supported by this driver, by looking at the device ID of the
* bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
* device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
* device.
*/
if ( (pdev->device == 0x0525)
&& (pdev->bus->self->vendor == 0x3388)
&& (pdev->bus->self->device == 0x0021) ) {
return 0;
}
return 2;
}
...@@ -38,10 +38,10 @@ ...@@ -38,10 +38,10 @@
#define DRIVER_NAME "mga" #define DRIVER_NAME "mga"
#define DRIVER_DESC "Matrox G200/G400" #define DRIVER_DESC "Matrox G200/G400"
#define DRIVER_DATE "20021029" #define DRIVER_DATE "20050607"
#define DRIVER_MAJOR 3 #define DRIVER_MAJOR 3
#define DRIVER_MINOR 1 #define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
typedef struct drm_mga_primary_buffer { typedef struct drm_mga_primary_buffer {
...@@ -87,9 +87,43 @@ typedef struct drm_mga_private { ...@@ -87,9 +87,43 @@ typedef struct drm_mga_private {
int chipset; int chipset;
int usec_timeout; int usec_timeout;
/**
* If set, the new DMA initialization sequence was used. This is
* primarilly used to select how the driver should uninitialized its
* internal DMA structures.
*/
int used_new_dma_init;
/**
* If AGP memory is used for DMA buffers, this will be the value
* \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer).
*/
u32 dma_access;
/**
* If AGP memory is used for DMA buffers, this will be the value
* \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI
* transfer).
*/
u32 wagp_enable;
/**
* \name MMIO region parameters.
*
* \sa drm_mga_private_t::mmio
*/
/*@{*/
u32 mmio_base; /**< Bus address of base of MMIO. */
u32 mmio_size; /**< Size of the MMIO region. */
/*@}*/
u32 clear_cmd; u32 clear_cmd;
u32 maccess; u32 maccess;
wait_queue_head_t fence_queue;
atomic_t last_fence_retired;
u32 next_fence_to_post;
unsigned int fb_cpp; unsigned int fb_cpp;
unsigned int front_offset; unsigned int front_offset;
unsigned int front_pitch; unsigned int front_pitch;
...@@ -108,35 +142,43 @@ typedef struct drm_mga_private { ...@@ -108,35 +142,43 @@ typedef struct drm_mga_private {
drm_local_map_t *status; drm_local_map_t *status;
drm_local_map_t *warp; drm_local_map_t *warp;
drm_local_map_t *primary; drm_local_map_t *primary;
drm_local_map_t *buffers;
drm_local_map_t *agp_textures; drm_local_map_t *agp_textures;
DRM_AGP_MEM *agp_mem;
unsigned int agp_pages;
} drm_mga_private_t; } drm_mga_private_t;
/* mga_dma.c */ /* mga_dma.c */
extern int mga_dma_init( DRM_IOCTL_ARGS ); extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
extern int mga_dma_flush( DRM_IOCTL_ARGS ); extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
extern int mga_dma_reset( DRM_IOCTL_ARGS ); extern int mga_dma_init(DRM_IOCTL_ARGS);
extern int mga_dma_buffers( DRM_IOCTL_ARGS ); extern int mga_dma_flush(DRM_IOCTL_ARGS);
extern void mga_driver_pretakedown(drm_device_t *dev); extern int mga_dma_reset(DRM_IOCTL_ARGS);
extern int mga_driver_dma_quiescent(drm_device_t *dev); extern int mga_dma_buffers(DRM_IOCTL_ARGS);
extern int mga_driver_postcleanup(drm_device_t * dev);
extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv ); extern void mga_driver_pretakedown(drm_device_t * dev);
extern int mga_driver_dma_quiescent(drm_device_t * dev);
extern void mga_do_dma_flush( drm_mga_private_t *dev_priv );
extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ); extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv );
extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ); extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf );
/* mga_warp.c */ /* mga_warp.c */
extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv ); extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
extern int mga_warp_init( drm_mga_private_t *dev_priv ); extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
extern int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS ); /* mga_irq.c */
extern void mga_driver_irq_preinstall( drm_device_t *dev ); extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
extern void mga_driver_irq_postinstall( drm_device_t *dev ); extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern void mga_driver_irq_uninstall( drm_device_t *dev ); extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
extern void mga_driver_irq_preinstall(drm_device_t * dev);
extern void mga_driver_irq_postinstall(drm_device_t * dev);
extern void mga_driver_irq_uninstall(drm_device_t * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg); unsigned long arg);
...@@ -527,6 +569,12 @@ do { \ ...@@ -527,6 +569,12 @@ do { \
*/ */
#define MGA_EXEC 0x0100 #define MGA_EXEC 0x0100
/* AGP PLL encoding (for G200 only).
*/
#define MGA_AGP_PLL 0x1e4c
# define MGA_AGP2XPLL_DISABLE (0 << 0)
# define MGA_AGP2XPLL_ENABLE (1 << 0)
/* Warp registers /* Warp registers
*/ */
#define MGA_WR0 0x2d00 #define MGA_WR0 0x2d00
......
...@@ -129,9 +129,76 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd, ...@@ -129,9 +129,76 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
} }
typedef struct drm_mga_drm_bootstrap32 {
u32 texture_handle;
u32 texture_size;
u32 primary_size;
u32 secondary_bin_count;
u32 secondary_bin_size;
u32 agp_mode;
u8 agp_size;
} drm_mga_dma_bootstrap32_t;
static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_mga_dma_bootstrap32_t dma_bootstrap32;
drm_mga_dma_bootstrap_t __user *dma_bootstrap;
int err;
if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
sizeof(dma_bootstrap32)))
return -EFAULT;
dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
|| __put_user(dma_bootstrap32.texture_handle,
&dma_bootstrap->texture_handle)
|| __put_user(dma_bootstrap32.texture_size,
&dma_bootstrap->texture_size)
|| __put_user(dma_bootstrap32.primary_size,
&dma_bootstrap->primary_size)
|| __put_user(dma_bootstrap32.secondary_bin_count,
&dma_bootstrap->secondary_bin_count)
|| __put_user(dma_bootstrap32.secondary_bin_size,
&dma_bootstrap->secondary_bin_size)
|| __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
|| __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
return -EFAULT;
err = drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_MGA_DMA_BOOTSTRAP,
(unsigned long)dma_bootstrap);
if (err)
return err;
if (__get_user(dma_bootstrap32.texture_handle,
&dma_bootstrap->texture_handle)
|| __get_user(dma_bootstrap32.texture_size,
&dma_bootstrap->texture_size)
|| __get_user(dma_bootstrap32.primary_size,
&dma_bootstrap->primary_size)
|| __get_user(dma_bootstrap32.secondary_bin_count,
&dma_bootstrap->secondary_bin_count)
|| __get_user(dma_bootstrap32.secondary_bin_size,
&dma_bootstrap->secondary_bin_size)
|| __get_user(dma_bootstrap32.agp_mode,
&dma_bootstrap->agp_mode)
|| __get_user(dma_bootstrap32.agp_size,
&dma_bootstrap->agp_size))
return -EFAULT;
if (copy_to_user((void __user *)arg, &dma_bootstrap32,
sizeof(dma_bootstrap32)))
return -EFAULT;
return 0;
}
drm_ioctl_compat_t *mga_compat_ioctls[] = { drm_ioctl_compat_t *mga_compat_ioctls[] = {
[DRM_MGA_INIT] = compat_mga_init, [DRM_MGA_INIT] = compat_mga_init,
[DRM_MGA_GETPARAM] = compat_mga_getparam, [DRM_MGA_GETPARAM] = compat_mga_getparam,
[DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
}; };
/** /**
......
...@@ -41,15 +41,40 @@ irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS ) ...@@ -41,15 +41,40 @@ irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS )
drm_mga_private_t *dev_priv = drm_mga_private_t *dev_priv =
(drm_mga_private_t *)dev->dev_private; (drm_mga_private_t *)dev->dev_private;
int status; int status;
int handled = 0;
status = MGA_READ( MGA_STATUS ); status = MGA_READ(MGA_STATUS);
/* VBLANK interrupt */ /* VBLANK interrupt */
if ( status & MGA_VLINEPEN ) { if ( status & MGA_VLINEPEN ) {
MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR ); MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
atomic_inc(&dev->vbl_received); atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue); DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals( dev ); drm_vbl_send_signals(dev);
handled = 1;
}
/* SOFTRAP interrupt */
if (status & MGA_SOFTRAPEN) {
const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
const u32 prim_end = MGA_READ(MGA_PRIMEND);
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
/* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation.
*/
if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) {
MGA_WRITE(MGA_PRIMEND, prim_end);
}
atomic_inc(&dev_priv->last_fence_retired);
DRM_WAKEUP(&dev_priv->fence_queue);
handled = 1;
}
if ( handled ) {
return IRQ_HANDLED; return IRQ_HANDLED;
} }
return IRQ_NONE; return IRQ_NONE;
...@@ -73,9 +98,28 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) ...@@ -73,9 +98,28 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
return ret; return ret;
} }
void mga_driver_irq_preinstall( drm_device_t *dev ) { int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
drm_mga_private_t *dev_priv = {
(drm_mga_private_t *)dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
int ret = 0;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using fences.
*/
DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
(((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- *sequence) <= (1 << 23)));
*sequence = cur_fence;
return ret;
}
void mga_driver_irq_preinstall(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Disable *all* interrupts */ /* Disable *all* interrupts */
MGA_WRITE( MGA_IEN, 0 ); MGA_WRITE( MGA_IEN, 0 );
...@@ -83,12 +127,14 @@ void mga_driver_irq_preinstall( drm_device_t *dev ) { ...@@ -83,12 +127,14 @@ void mga_driver_irq_preinstall( drm_device_t *dev ) {
MGA_WRITE( MGA_ICLEAR, ~0 ); MGA_WRITE( MGA_ICLEAR, ~0 );
} }
void mga_driver_irq_postinstall( drm_device_t *dev ) { void mga_driver_irq_postinstall(drm_device_t * dev)
drm_mga_private_t *dev_priv = {
(drm_mga_private_t *)dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Turn on VBL interrupt */ DRM_INIT_WAITQUEUE( &dev_priv->fence_queue );
MGA_WRITE( MGA_IEN, MGA_VLINEIEN );
/* Turn on vertical blank interrupt and soft trap interrupt. */
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
} }
void mga_driver_irq_uninstall( drm_device_t *dev ) { void mga_driver_irq_uninstall( drm_device_t *dev ) {
...@@ -98,5 +144,7 @@ void mga_driver_irq_uninstall( drm_device_t *dev ) { ...@@ -98,5 +144,7 @@ void mga_driver_irq_uninstall( drm_device_t *dev ) {
return; return;
/* Disable *all* interrupts */ /* Disable *all* interrupts */
MGA_WRITE( MGA_IEN, 0 ); MGA_WRITE(MGA_IEN, 0);
dev->irq_enabled = 0;
} }
...@@ -53,16 +53,16 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv, ...@@ -53,16 +53,16 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
/* Force reset of DWGCTL on G400 (eliminates clip disable bit). /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
*/ */
if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
DMA_BLOCK( MGA_DWGCTL, ctx->dwgctl, DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
MGA_LEN + MGA_EXEC, 0x80000000, MGA_LEN + MGA_EXEC, 0x80000000,
MGA_DWGCTL, ctx->dwgctl, MGA_DWGCTL, ctx->dwgctl,
MGA_LEN + MGA_EXEC, 0x80000000 ); MGA_LEN + MGA_EXEC, 0x80000000);
} }
DMA_BLOCK( MGA_DMAPAD, 0x00000000, DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_CXBNDRY, (box->x2 << 16) | box->x1, MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
MGA_YTOP, box->y1 * pitch, MGA_YTOP, box->y1 * pitch,
MGA_YBOT, box->y2 * pitch ); MGA_YBOT, (box->y2 - 1) * pitch);
ADVANCE_DMA(); ADVANCE_DMA();
} }
...@@ -260,12 +260,11 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv ) ...@@ -260,12 +260,11 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv )
/* Padding required to to hardware bug. /* Padding required to to hardware bug.
*/ */
DMA_BLOCK( MGA_DMAPAD, 0xffffffff, DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff,
MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
MGA_WMODE_START | MGA_WMODE_START | dev_priv->wagp_enable));
MGA_WAGP_ENABLE) );
ADVANCE_DMA(); ADVANCE_DMA();
} }
...@@ -342,12 +341,11 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv ) ...@@ -342,12 +341,11 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */ MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */
/* Padding required to to hardware bug */ /* Padding required to to hardware bug */
DMA_BLOCK( MGA_DMAPAD, 0xffffffff, DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff,
MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
MGA_WMODE_START | MGA_WMODE_START | dev_priv->wagp_enable));
MGA_WAGP_ENABLE) );
ADVANCE_DMA(); ADVANCE_DMA();
} }
...@@ -459,9 +457,9 @@ static int mga_verify_state( drm_mga_private_t *dev_priv ) ...@@ -459,9 +457,9 @@ static int mga_verify_state( drm_mga_private_t *dev_priv )
if ( dirty & MGA_UPLOAD_TEX0 ) if ( dirty & MGA_UPLOAD_TEX0 )
ret |= mga_verify_tex( dev_priv, 0 ); ret |= mga_verify_tex( dev_priv, 0 );
if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
if ( dirty & MGA_UPLOAD_TEX1 ) if (dirty & MGA_UPLOAD_TEX1)
ret |= mga_verify_tex( dev_priv, 1 ); ret |= mga_verify_tex(dev_priv, 1);
if ( dirty & MGA_UPLOAD_PIPE ) if ( dirty & MGA_UPLOAD_PIPE )
ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES ); ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES );
...@@ -686,12 +684,12 @@ static void mga_dma_dispatch_vertex( drm_device_t *dev, drm_buf_t *buf ) ...@@ -686,12 +684,12 @@ static void mga_dma_dispatch_vertex( drm_device_t *dev, drm_buf_t *buf )
BEGIN_DMA( 1 ); BEGIN_DMA( 1 );
DMA_BLOCK( MGA_DMAPAD, 0x00000000, DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000,
MGA_SECADDRESS, (address | MGA_SECADDRESS, (address |
MGA_DMA_VERTEX), MGA_DMA_VERTEX),
MGA_SECEND, ((address + length) | MGA_SECEND, ((address + length) |
MGA_PAGPXFER) ); dev_priv->dma_access));
ADVANCE_DMA(); ADVANCE_DMA();
} while ( ++i < sarea_priv->nbox ); } while ( ++i < sarea_priv->nbox );
...@@ -733,11 +731,11 @@ static void mga_dma_dispatch_indices( drm_device_t *dev, drm_buf_t *buf, ...@@ -733,11 +731,11 @@ static void mga_dma_dispatch_indices( drm_device_t *dev, drm_buf_t *buf,
BEGIN_DMA( 1 ); BEGIN_DMA( 1 );
DMA_BLOCK( MGA_DMAPAD, 0x00000000, DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000,
MGA_SETUPADDRESS, address + start, MGA_SETUPADDRESS, address + start,
MGA_SETUPEND, ((address + end) | MGA_SETUPEND, ((address + end) |
MGA_PAGPXFER) ); dev_priv->dma_access));
ADVANCE_DMA(); ADVANCE_DMA();
} while ( ++i < sarea_priv->nbox ); } while ( ++i < sarea_priv->nbox );
...@@ -764,7 +762,7 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf, ...@@ -764,7 +762,7 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf,
drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private;
drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
u32 y2; u32 y2;
DMA_LOCALS; DMA_LOCALS;
DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used );
...@@ -1095,6 +1093,9 @@ static int mga_getparam( DRM_IOCTL_ARGS ) ...@@ -1095,6 +1093,9 @@ static int mga_getparam( DRM_IOCTL_ARGS )
case MGA_PARAM_IRQ_NR: case MGA_PARAM_IRQ_NR:
value = dev->irq; value = dev->irq;
break; break;
case MGA_PARAM_CARD_TYPE:
value = dev_priv->chipset;
break;
default: default:
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
...@@ -1107,17 +1108,82 @@ static int mga_getparam( DRM_IOCTL_ARGS ) ...@@ -1107,17 +1108,82 @@ static int mga_getparam( DRM_IOCTL_ARGS )
return 0; return 0;
} }
static int mga_set_fence(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
u32 temp;
DMA_LOCALS;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
/* I would normal do this assignment in the declaration of temp,
* but dev_priv may be NULL.
*/
temp = dev_priv->next_fence_to_post;
dev_priv->next_fence_to_post++;
BEGIN_DMA(1);
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_SOFTRAP, 0x00000000);
ADVANCE_DMA();
if (DRM_COPY_TO_USER( (u32 __user *) data, & temp, sizeof(u32))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
}
return 0;
}
static int mga_wait_fence(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
u32 fence;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
mga_driver_fence_wait(dev, & fence);
if (DRM_COPY_TO_USER( (u32 __user *) data, & fence, sizeof(u32))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
}
return 0;
}
drm_ioctl_desc_t mga_ioctls[] = { drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_MGA_INIT)] = { mga_dma_init, 1, 1 }, [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
[DRM_IOCTL_NR(DRM_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_RESET)] = { mga_dma_reset, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_GETPARAM)]= { mga_getparam, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
}; };
int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
...@@ -48,65 +48,52 @@ do { \ ...@@ -48,65 +48,52 @@ do { \
vcbase += WARP_UCODE_SIZE( which ); \ vcbase += WARP_UCODE_SIZE( which ); \
} while (0) } while (0)
static const unsigned int mga_warp_g400_microcode_size =
static unsigned int mga_warp_g400_microcode_size( drm_mga_private_t *dev_priv ) (WARP_UCODE_SIZE(warp_g400_tgz) +
{ WARP_UCODE_SIZE(warp_g400_tgza) +
unsigned int size; WARP_UCODE_SIZE(warp_g400_tgzaf) +
WARP_UCODE_SIZE(warp_g400_tgzf) +
size = ( WARP_UCODE_SIZE( warp_g400_tgz ) + WARP_UCODE_SIZE(warp_g400_tgzs) +
WARP_UCODE_SIZE( warp_g400_tgza ) + WARP_UCODE_SIZE(warp_g400_tgzsa) +
WARP_UCODE_SIZE( warp_g400_tgzaf ) + WARP_UCODE_SIZE(warp_g400_tgzsaf) +
WARP_UCODE_SIZE( warp_g400_tgzf ) + WARP_UCODE_SIZE(warp_g400_tgzsf) +
WARP_UCODE_SIZE( warp_g400_tgzs ) + WARP_UCODE_SIZE(warp_g400_t2gz) +
WARP_UCODE_SIZE( warp_g400_tgzsa ) + WARP_UCODE_SIZE(warp_g400_t2gza) +
WARP_UCODE_SIZE( warp_g400_tgzsaf ) + WARP_UCODE_SIZE(warp_g400_t2gzaf) +
WARP_UCODE_SIZE( warp_g400_tgzsf ) + WARP_UCODE_SIZE(warp_g400_t2gzf) +
WARP_UCODE_SIZE( warp_g400_t2gz ) + WARP_UCODE_SIZE(warp_g400_t2gzs) +
WARP_UCODE_SIZE( warp_g400_t2gza ) + WARP_UCODE_SIZE(warp_g400_t2gzsa) +
WARP_UCODE_SIZE( warp_g400_t2gzaf ) + WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
WARP_UCODE_SIZE( warp_g400_t2gzf ) + WARP_UCODE_SIZE(warp_g400_t2gzsf));
WARP_UCODE_SIZE( warp_g400_t2gzs ) +
WARP_UCODE_SIZE( warp_g400_t2gzsa ) + static const unsigned int mga_warp_g200_microcode_size =
WARP_UCODE_SIZE( warp_g400_t2gzsaf ) + (WARP_UCODE_SIZE(warp_g200_tgz) +
WARP_UCODE_SIZE( warp_g400_t2gzsf ) ); WARP_UCODE_SIZE(warp_g200_tgza) +
WARP_UCODE_SIZE(warp_g200_tgzaf) +
size = PAGE_ALIGN( size ); WARP_UCODE_SIZE(warp_g200_tgzf) +
WARP_UCODE_SIZE(warp_g200_tgzs) +
DRM_DEBUG( "G400 ucode size = %d bytes\n", size ); WARP_UCODE_SIZE(warp_g200_tgzsa) +
return size; WARP_UCODE_SIZE(warp_g200_tgzsaf) +
} WARP_UCODE_SIZE(warp_g200_tgzsf));
static unsigned int mga_warp_g200_microcode_size( drm_mga_private_t *dev_priv )
unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
{ {
unsigned int size; switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400:
size = ( WARP_UCODE_SIZE( warp_g200_tgz ) + case MGA_CARD_TYPE_G550:
WARP_UCODE_SIZE( warp_g200_tgza ) + return PAGE_ALIGN(mga_warp_g400_microcode_size);
WARP_UCODE_SIZE( warp_g200_tgzaf ) + case MGA_CARD_TYPE_G200:
WARP_UCODE_SIZE( warp_g200_tgzf ) + return PAGE_ALIGN(mga_warp_g200_microcode_size);
WARP_UCODE_SIZE( warp_g200_tgzs ) + default:
WARP_UCODE_SIZE( warp_g200_tgzsa ) + return 0;
WARP_UCODE_SIZE( warp_g200_tgzsaf ) + }
WARP_UCODE_SIZE( warp_g200_tgzsf ) );
size = PAGE_ALIGN( size );
DRM_DEBUG( "G200 ucode size = %d bytes\n", size );
return size;
} }
static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv ) static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv )
{ {
unsigned char *vcbase = dev_priv->warp->handle; unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset; unsigned long pcbase = dev_priv->warp->offset;
unsigned int size;
size = mga_warp_g400_microcode_size( dev_priv );
if ( size > dev_priv->warp->size ) {
DRM_ERROR( "microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size );
return DRM_ERR(ENOMEM);
}
memset( dev_priv->warp_pipe_phys, 0, memset( dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys) ); sizeof(dev_priv->warp_pipe_phys) );
...@@ -136,35 +123,36 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv ) ...@@ -136,35 +123,36 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv )
{ {
unsigned char *vcbase = dev_priv->warp->handle; unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset; unsigned long pcbase = dev_priv->warp->offset;
unsigned int size;
size = mga_warp_g200_microcode_size( dev_priv );
if ( size > dev_priv->warp->size ) {
DRM_ERROR( "microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size );
return DRM_ERR(ENOMEM);
}
memset( dev_priv->warp_pipe_phys, 0, memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
sizeof(dev_priv->warp_pipe_phys) );
WARP_UCODE_INSTALL( warp_g200_tgz, MGA_WARP_TGZ ); WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
WARP_UCODE_INSTALL( warp_g200_tgzf, MGA_WARP_TGZF ); WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
WARP_UCODE_INSTALL( warp_g200_tgza, MGA_WARP_TGZA ); WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
WARP_UCODE_INSTALL( warp_g200_tgzaf, MGA_WARP_TGZAF ); WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
WARP_UCODE_INSTALL( warp_g200_tgzs, MGA_WARP_TGZS ); WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
WARP_UCODE_INSTALL( warp_g200_tgzsf, MGA_WARP_TGZSF ); WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
WARP_UCODE_INSTALL( warp_g200_tgzsa, MGA_WARP_TGZSA ); WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
WARP_UCODE_INSTALL( warp_g200_tgzsaf, MGA_WARP_TGZSAF ); WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
return 0; return 0;
} }
int mga_warp_install_microcode( drm_mga_private_t *dev_priv ) int mga_warp_install_microcode( drm_mga_private_t *dev_priv )
{ {
switch ( dev_priv->chipset ) { const unsigned int size = mga_warp_microcode_size(dev_priv);
DRM_DEBUG("MGA ucode size = %d bytes\n", size);
if (size > dev_priv->warp->size) {
DRM_ERROR("microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size);
return DRM_ERR(ENOMEM);
}
switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G400:
return mga_warp_install_g400_microcode( dev_priv ); case MGA_CARD_TYPE_G550:
return mga_warp_install_g400_microcode(dev_priv);
case MGA_CARD_TYPE_G200: case MGA_CARD_TYPE_G200:
return mga_warp_install_g200_microcode( dev_priv ); return mga_warp_install_g200_microcode( dev_priv );
default: default:
...@@ -182,10 +170,11 @@ int mga_warp_init( drm_mga_private_t *dev_priv ) ...@@ -182,10 +170,11 @@ int mga_warp_init( drm_mga_private_t *dev_priv )
*/ */
switch ( dev_priv->chipset ) { switch ( dev_priv->chipset ) {
case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G400:
MGA_WRITE( MGA_WIADDR2, MGA_WMODE_SUSPEND ); case MGA_CARD_TYPE_G550:
MGA_WRITE( MGA_WGETMSB, 0x00000E00 ); MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
MGA_WRITE( MGA_WVRTXSZ, 0x00001807 ); MGA_WRITE(MGA_WGETMSB, 0x00000E00);
MGA_WRITE( MGA_WACCEPTSEQ, 0x18000000 ); MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
break; break;
case MGA_CARD_TYPE_G200: case MGA_CARD_TYPE_G200:
MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND ); MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND );
......
...@@ -326,7 +326,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev, ...@@ -326,7 +326,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
ring_start = dev_priv->cce_ring->offset - dev->agp->base; ring_start = dev_priv->cce_ring->offset - dev->agp->base;
else else
#endif #endif
ring_start = dev_priv->cce_ring->offset - dev->sg->handle; ring_start = dev_priv->cce_ring->offset -
(unsigned long)dev->sg->virtual;
R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET ); R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET );
...@@ -487,6 +488,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -487,6 +488,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
r128_do_cleanup_cce( dev ); r128_do_cleanup_cce( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) { if(!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n"); DRM_ERROR("could not find dma buffer region!\n");
...@@ -537,7 +539,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -537,7 +539,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
dev_priv->cce_buffers_offset = dev->agp->base; dev_priv->cce_buffers_offset = dev->agp->base;
else else
#endif #endif
dev_priv->cce_buffers_offset = dev->sg->handle; dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
......
...@@ -215,7 +215,7 @@ typedef struct drm_r128_sarea { ...@@ -215,7 +215,7 @@ typedef struct drm_r128_sarea {
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) #define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) #define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) #define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
#define DRM_IOCTL_R128_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) #define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) #define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
typedef struct drm_r128_init { typedef struct drm_r128_init {
......
/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
*
* Copyright (C) The Weather Channel, Inc. 2002.
* Copyright (C) 2004 Nicolai Haehnle.
* All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Nicolai Haehnle <prefect_@gmx.net>
*/
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
#define R300_SIMULTANEOUS_CLIPRECTS 4
/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
*/
static const int r300_cliprect_cntl[4] = {
0xAAAA,
0xEEEE,
0xFEFE,
0xFFFE
};
/**
* Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
* buffer, starting with index n.
*/
static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
int n)
{
drm_clip_rect_t box;
int nr;
int i;
RING_LOCALS;
nr = cmdbuf->nbox - n;
if (nr > R300_SIMULTANEOUS_CLIPRECTS)
nr = R300_SIMULTANEOUS_CLIPRECTS;
DRM_DEBUG("%i cliprects\n", nr);
if (nr) {
BEGIN_RING(6 + nr*2);
OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) );
for(i = 0; i < nr; ++i) {
if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return DRM_ERR(EFAULT);
}
box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
(box.y2 << R300_CLIPRECT_Y_SHIFT));
}
OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] );
/* TODO/SECURITY: Force scissors to a safe value, otherwise the
* client might be able to trample over memory.
* The impact should be very limited, but I'd rather be safe than
* sorry.
*/
OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) );
OUT_RING( 0 );
OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK );
ADVANCE_RING();
} else {
/* Why we allow zero cliprect rendering:
* There are some commands in a command buffer that must be submitted
* even when there are no cliprects, e.g. DMA buffer discard
* or state setting (though state setting could be avoided by
* simulating a loss of context).
*
* Now since the cmdbuf interface is so chaotic right now (and is
* bound to remain that way for a bit until things settle down),
* it is basically impossible to filter out the commands that are
* necessary and those that aren't.
*
* So I choose the safe way and don't do any filtering at all;
* instead, I simply set up the engine so that all rendering
* can't produce any fragments.
*/
BEGIN_RING(2);
OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 );
ADVANCE_RING();
}
return 0;
}
u8 r300_reg_flags[0x10000>>2];
void r300_init_reg_flags(void)
{
int i;
memset(r300_reg_flags, 0, 0x10000>>2);
#define ADD_RANGE_MARK(reg, count,mark) \
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
r300_reg_flags[i]|=(mark);
#define MARK_SAFE 1
#define MARK_CHECK_OFFSET 2
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
ADD_RANGE(0x2080, 1);
ADD_RANGE(R300_SE_VTE_CNTL, 2);
ADD_RANGE(0x2134, 2);
ADD_RANGE(0x2140, 1);
ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
ADD_RANGE(0x21DC, 1);
ADD_RANGE(0x221C, 1);
ADD_RANGE(0x2220, 4);
ADD_RANGE(0x2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5);
ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1);
ADD_RANGE(R300_RE_POINTSIZE, 1);
ADD_RANGE(0x4230, 3);
ADD_RANGE(R300_RE_LINE_CNT, 1);
ADD_RANGE(0x4238, 1);
ADD_RANGE(0x4260, 3);
ADD_RANGE(0x4274, 4);
ADD_RANGE(0x4288, 5);
ADD_RANGE(0x42A0, 1);
ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
ADD_RANGE(0x42B4, 1);
ADD_RANGE(R300_RE_CULL_CNTL, 1);
ADD_RANGE(0x42C0, 2);
ADD_RANGE(R300_RS_CNTL_0, 2);
ADD_RANGE(R300_RS_INTERP_0, 8);
ADD_RANGE(R300_RS_ROUTE_0, 8);
ADD_RANGE(0x43A4, 2);
ADD_RANGE(0x43E8, 1);
ADD_RANGE(R300_PFS_CNTL_0, 3);
ADD_RANGE(R300_PFS_NODE_0, 4);
ADD_RANGE(R300_PFS_TEXI_0, 64);
ADD_RANGE(0x46A4, 5);
ADD_RANGE(R300_PFS_INSTR0_0, 64);
ADD_RANGE(R300_PFS_INSTR1_0, 64);
ADD_RANGE(R300_PFS_INSTR2_0, 64);
ADD_RANGE(R300_PFS_INSTR3_0, 64);
ADD_RANGE(0x4BC0, 1);
ADD_RANGE(0x4BC8, 3);
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
ADD_RANGE(0x4BD8, 1);
ADD_RANGE(R300_PFS_PARAM_0_X, 64);
ADD_RANGE(0x4E00, 1);
ADD_RANGE(R300_RB3D_CBLEND, 2);
ADD_RANGE(R300_RB3D_COLORMASK, 1);
ADD_RANGE(0x4E10, 3);
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
ADD_RANGE(0x4F10, 4);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
ADD_RANGE(0x4F28, 1);
ADD_RANGE(0x4F30, 2);
ADD_RANGE(0x4F44, 1);
ADD_RANGE(0x4F54, 1);
ADD_RANGE(R300_TX_FILTER_0, 16);
ADD_RANGE(R300_TX_UNK1_0, 16);
ADD_RANGE(R300_TX_SIZE_0, 16);
ADD_RANGE(R300_TX_FORMAT_0, 16);
/* Texture offset is dangerous and needs more checking */
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
ADD_RANGE(R300_TX_UNK4_0, 16);
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
/* Sporadic registers used as primitives are emitted */
ADD_RANGE(0x4f18, 1);
ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
}
static __inline__ int r300_check_range(unsigned reg, int count)
{
int i;
if(reg & ~0xffff)return -1;
for(i=(reg>>2);i<(reg>>2)+count;i++)
if(r300_reg_flags[i]!=MARK_SAFE)return 1;
return 0;
}
/* we expect offsets passed to the framebuffer to be either within video memory or
within AGP space */
static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset)
{
/* we realy want to check against end of video aperture
but this value is not being kept.
This code is correct for now (does the same thing as the
code that sets MC_FB_LOCATION) in radeon_cp.c */
if((offset>=dev_priv->fb_location) &&
(offset<dev_priv->gart_vm_start))return 0;
if((offset>=dev_priv->gart_vm_start) &&
(offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0;
return 1;
}
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
{
int reg;
int sz;
int i;
int values[64];
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if((sz>64)||(sz<0)){
DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz);
return DRM_ERR(EINVAL);
}
for(i=0;i<sz;i++){
values[i]=((int __user*)cmdbuf->buf)[i];
switch(r300_reg_flags[(reg>>2)+i]){
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
if(r300_check_offset(dev_priv, (u32)values[i])){
DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz);
return DRM_ERR(EINVAL);
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]);
return DRM_ERR(EINVAL);
}
}
BEGIN_RING(1+sz);
OUT_RING( CP_PACKET0( reg, sz-1 ) );
OUT_RING_TABLE( values, sz );
ADVANCE_RING();
cmdbuf->buf += sz*4;
cmdbuf->bufsz -= sz*4;
return 0;
}
/**
* Emits a packet0 setting arbitrary registers.
* Called by r300_do_cp_cmdbuf.
*
* Note that checks are performed on contents and addresses of the registers
*/
static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
{
int reg;
int sz;
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if (!sz)
return 0;
if (sz*4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
if (reg+sz*4 >= 0x10000){
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz);
return DRM_ERR(EINVAL);
}
if(r300_check_range(reg, sz)){
/* go and check everything */
return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header);
}
/* the rest of the data is safe to emit, whatever the values the user passed */
BEGIN_RING(1+sz);
OUT_RING( CP_PACKET0( reg, sz-1 ) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz );
ADVANCE_RING();
cmdbuf->buf += sz*4;
cmdbuf->bufsz -= sz*4;
return 0;
}
/**
* Uploads user-supplied vertex program instructions or parameters onto
* the graphics card.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
{
int sz;
int addr;
RING_LOCALS;
sz = header.vpu.count;
addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
if (!sz)
return 0;
if (sz*16 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
BEGIN_RING(5+sz*4);
/* Wait for VAP to come to senses.. */
/* there is no need to emit it multiple times, (only once before VAP is programmed,
but this optimization is for later */
OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 );
OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr );
OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 );
ADVANCE_RING();
cmdbuf->buf += sz*16;
cmdbuf->bufsz -= sz*16;
return 0;
}
/**
* Emit a clear packet from userspace.
* Called by r300_emit_packet3.
*/
static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
{
RING_LOCALS;
if (8*4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
BEGIN_RING(10);
OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
(1<<R300_PRIM_NUM_VERTICES_SHIFT) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 );
ADVANCE_RING();
cmdbuf->buf += 8*4;
cmdbuf->bufsz -= 8*4;
return 0;
}
static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
u32 header)
{
int count, i,k;
#define MAX_ARRAY_PACKET 64
u32 payload[MAX_ARRAY_PACKET];
u32 narrays;
RING_LOCALS;
count=(header>>16) & 0x3fff;
if((count+1)>MAX_ARRAY_PACKET){
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count);
return DRM_ERR(EINVAL);
}
memset(payload, 0, MAX_ARRAY_PACKET*4);
memcpy(payload, cmdbuf->buf+4, (count+1)*4);
/* carefully check packet contents */
narrays=payload[0];
k=0;
i=1;
while((k<narrays) && (i<(count+1))){
i++; /* skip attribute field */
if(r300_check_offset(dev_priv, payload[i])){
DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
return DRM_ERR(EINVAL);
}
k++;
i++;
if(k==narrays)break;
/* have one more to process, they come in pairs */
if(r300_check_offset(dev_priv, payload[i])){
DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
return DRM_ERR(EINVAL);
}
k++;
i++;
}
/* do the counts match what we expect ? */
if((k!=narrays) || (i!=(count+1))){
DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1);
return DRM_ERR(EINVAL);
}
/* all clear, output packet */
BEGIN_RING(count+2);
OUT_RING(header);
OUT_RING_TABLE(payload, count+1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
return 0;
}
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
{
u32 header;
int count;
RING_LOCALS;
if (4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
/* obtain first word - actual packet3 header */
header = *(u32 __user*)cmdbuf->buf;
/* Is it packet 3 ? */
if( (header>>30)!=0x3 ) {
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
return DRM_ERR(EINVAL);
}
count=(header>>16) & 0x3fff;
/* Check again now that we know how much data to expect */
if ((count+2)*4 > cmdbuf->bufsz){
DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n",
(count+2)*4, cmdbuf->bufsz);
return DRM_ERR(EINVAL);
}
/* Is it a packet type we know about ? */
switch(header & 0xff00){
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
case RADEON_WAIT_FOR_IDLE:
case RADEON_CP_NOP:
/* these packets are safe */
break;
default:
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
return DRM_ERR(EINVAL);
}
BEGIN_RING(count+2);
OUT_RING(header);
OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
return 0;
}
/**
* Emit a rendering packet3 from userspace.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
{
int n;
int ret;
char __user* orig_buf = cmdbuf->buf;
int orig_bufsz = cmdbuf->bufsz;
/* This is a do-while-loop so that we run the interior at least once,
* even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
*/
n = 0;
do {
if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
if (ret)
return ret;
cmdbuf->buf = orig_buf;
cmdbuf->bufsz = orig_bufsz;
}
switch(header.packet3.packet) {
case R300_CMD_PACKET3_CLEAR:
DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
ret = r300_emit_clear(dev_priv, cmdbuf);
if (ret) {
DRM_ERROR("r300_emit_clear failed\n");
return ret;
}
break;
case R300_CMD_PACKET3_RAW:
DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
if (ret) {
DRM_ERROR("r300_emit_raw_packet3 failed\n");
return ret;
}
break;
default:
DRM_ERROR("bad packet3 type %i at %p\n",
header.packet3.packet,
cmdbuf->buf - sizeof(header));
return DRM_ERR(EINVAL);
}
n += R300_SIMULTANEOUS_CLIPRECTS;
} while(n < cmdbuf->nbox);
return 0;
}
/* Some of the R300 chips seem to be extremely touchy about the two registers
* that are configured in r300_pacify.
* Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
* sends a command buffer that contains only state setting commands and a
* vertex program/parameter upload sequence, this will eventually lead to a
* lockup, unless the sequence is bracketed by calls to r300_pacify.
* So we should take great care to *always* call r300_pacify before
* *anything* 3D related, and again afterwards. This is what the
* call bracket in r300_do_cp_cmdbuf is for.
*/
/**
* Emit the sequence to pacify R300.
*/
static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
{
RING_LOCALS;
BEGIN_RING(6);
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
OUT_RING( 0xa );
OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
OUT_RING( 0x3 );
OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
OUT_RING( 0x0 );
ADVANCE_RING();
}
/**
* Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
* be careful about how this function is called.
*/
static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
buf->pending = 1;
buf->used = 0;
}
/**
* Parses and validates a user-supplied command buffer and emits appropriate
* commands on the DMA ring buffer.
* Called by the ioctl handler function radeon_cp_cmdbuf.
*/
int r300_do_cp_cmdbuf(drm_device_t* dev,
DRMFILE filp,
drm_file_t* filp_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf = NULL;
int emit_dispatch_age = 0;
int ret = 0;
DRM_DEBUG("\n");
/* See the comment above r300_emit_begin3d for why this call must be here,
* and what the cleanup gotos are for. */
r300_pacify(dev_priv);
if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
if (ret)
goto cleanup;
}
while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
int idx;
drm_r300_cmd_header_t header;
header.u = *(unsigned int *)cmdbuf->buf;
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
switch(header.header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
ret = r300_emit_packet0(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
goto cleanup;
}
break;
case R300_CMD_VPU:
DRM_DEBUG("R300_CMD_VPU\n");
ret = r300_emit_vpu(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_vpu failed\n");
goto cleanup;
}
break;
case R300_CMD_PACKET3:
DRM_DEBUG("R300_CMD_PACKET3\n");
ret = r300_emit_packet3(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_packet3 failed\n");
goto cleanup;
}
break;
case R300_CMD_END3D:
DRM_DEBUG("R300_CMD_END3D\n");
/* TODO:
Ideally userspace driver should not need to issue this call,
i.e. the drm driver should issue it automatically and prevent
lockups.
In practice, we do not understand why this call is needed and what
it does (except for some vague guesses that it has to do with cache
coherence) and so the user space driver does it.
Once we are sure which uses prevent lockups the code could be moved
into the kernel and the userspace driver will not
need to use this command.
Note that issuing this command does not hurt anything
except, possibly, performance */
r300_pacify(dev_priv);
break;
case R300_CMD_CP_DELAY:
/* simple enough, we can do it here */
DRM_DEBUG("R300_CMD_CP_DELAY\n");
{
int i;
RING_LOCALS;
BEGIN_RING(header.delay.count);
for(i=0;i<header.delay.count;i++)
OUT_RING(RADEON_CP_PACKET2);
ADVANCE_RING();
}
break;
case R300_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
idx = header.dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
ret = DRM_ERR(EINVAL);
goto cleanup;
}
buf = dma->buflist[idx];
if (buf->filp != filp || buf->pending) {
DRM_ERROR("bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
ret = DRM_ERR(EINVAL);
goto cleanup;
}
emit_dispatch_age = 1;
r300_discard_buffer(dev, buf);
break;
case R300_CMD_WAIT:
/* simple enough, we can do it here */
DRM_DEBUG("R300_CMD_WAIT\n");
if(header.wait.flags==0)break; /* nothing to do */
{
RING_LOCALS;
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );
OUT_RING( (header.wait.flags & 0xf)<<14 );
ADVANCE_RING();
}
break;
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
cmdbuf->buf - sizeof(header));
ret = DRM_ERR(EINVAL);
goto cleanup;
}
}
DRM_DEBUG("END\n");
cleanup:
r300_pacify(dev_priv);
/* We emit the vertex buffer age here, outside the pacifier "brackets"
* for two reasons:
* (1) This may coalesce multiple age emissions into a single one and
* (2) more importantly, some chips lock up hard when scratch registers
* are written inside the pacifier bracket.
*/
if (emit_dispatch_age) {
RING_LOCALS;
/* Emit the vertex buffer age */
BEGIN_RING(2);
RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
ADVANCE_RING();
}
COMMIT_RING();
return ret;
}
/**************************************************************************
Copyright (C) 2004-2005 Nicolai Haehnle et al.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
on the rights to use, copy, modify, merge, publish, distribute, sub
license, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#ifndef _R300_REG_H
#define _R300_REG_H
#define R300_MC_INIT_MISC_LAT_TIMER 0x180
# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4
# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8
# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12
# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16
# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20
# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24
# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28
#define R300_MC_INIT_GFX_LAT_TIMER 0x154
# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0
# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4
# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8
# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12
# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16
# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20
# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24
# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
/*
This file contains registers and constants for the R300. They have been
found mostly by examining command buffers captured using glxtest, as well
as by extrapolating some known registers and constants from the R200.
I am fairly certain that they are correct unless stated otherwise in comments.
*/
#define R300_SE_VPORT_XSCALE 0x1D98
#define R300_SE_VPORT_XOFFSET 0x1D9C
#define R300_SE_VPORT_YSCALE 0x1DA0
#define R300_SE_VPORT_YOFFSET 0x1DA4
#define R300_SE_VPORT_ZSCALE 0x1DA8
#define R300_SE_VPORT_ZOFFSET 0x1DAC
/* This register is written directly and also starts data section in many 3d CP_PACKET3's */
#define R300_VAP_VF_CNTL 0x2084
# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
/* State based - direct writes to registers trigger vertex generation */
# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
/* I don't think I saw these three used.. */
# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
/* index size - when not set the indices are assumed to be 16 bit */
# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
/* number of vertices */
# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
/* BEGIN: Wild guesses */
#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1)
# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
/* END */
#define R300_SE_VTE_CNTL 0x20b0
# define R300_VPORT_X_SCALE_ENA 0x00000001
# define R300_VPORT_X_OFFSET_ENA 0x00000002
# define R300_VPORT_Y_SCALE_ENA 0x00000004
# define R300_VPORT_Y_OFFSET_ENA 0x00000008
# define R300_VPORT_Z_SCALE_ENA 0x00000010
# define R300_VPORT_Z_OFFSET_ENA 0x00000020
# define R300_VTX_XY_FMT 0x00000100
# define R300_VTX_Z_FMT 0x00000200
# define R300_VTX_W0_FMT 0x00000400
# define R300_VTX_W0_NORMALIZE 0x00000800
# define R300_VTX_ST_DENORMALIZED 0x00001000
/* BEGIN: Vertex data assembly - lots of uncertainties */
/* gap */
/* Where do we get our vertex data?
//
// Vertex data either comes either from immediate mode registers or from
// vertex arrays.
// There appears to be no mixed mode (though we can force the pitch of
// vertex arrays to 0, effectively reusing the same element over and over
// again).
//
// Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
// if these registers influence vertex array processing.
//
// Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
//
// In both cases, vertex attributes are then passed through INPUT_ROUTE.
// Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
// into the vertex processor's input registers.
// The first word routes the first input, the second word the second, etc.
// The corresponding input is routed into the register with the given index.
// The list is ended by a word with INPUT_ROUTE_END set.
//
// Always set COMPONENTS_4 in immediate mode. */
#define R300_VAP_INPUT_ROUTE_0_0 0x2150
# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0)
# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0)
# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0)
# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0)
# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8
# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
# define R300_VAP_INPUT_ROUTE_END (1 << 13)
# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
#define R300_VAP_INPUT_ROUTE_0_1 0x2154
#define R300_VAP_INPUT_ROUTE_0_2 0x2158
#define R300_VAP_INPUT_ROUTE_0_3 0x215C
#define R300_VAP_INPUT_ROUTE_0_4 0x2160
#define R300_VAP_INPUT_ROUTE_0_5 0x2164
#define R300_VAP_INPUT_ROUTE_0_6 0x2168
#define R300_VAP_INPUT_ROUTE_0_7 0x216C
/* gap */
/* Notes:
// - always set up to produce at least two attributes:
// if vertex program uses only position, fglrx will set normal, too
// - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */
#define R300_VAP_INPUT_CNTL_0 0x2180
# define R300_INPUT_CNTL_0_COLOR 0x00000001
#define R300_VAP_INPUT_CNTL_1 0x2184
# define R300_INPUT_CNTL_POS 0x00000001
# define R300_INPUT_CNTL_NORMAL 0x00000002
# define R300_INPUT_CNTL_COLOR 0x00000004
# define R300_INPUT_CNTL_TC0 0x00000400
# define R300_INPUT_CNTL_TC1 0x00000800
# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
/* gap */
/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
// are set to a swizzling bit pattern, other words are 0.
//
// In immediate mode, the pattern is always set to xyzw. In vertex array
// mode, the swizzling pattern is e.g. used to set zw components in texture
// coordinates with only tweo components. */
#define R300_VAP_INPUT_ROUTE_1_0 0x21E0
# define R300_INPUT_ROUTE_SELECT_X 0
# define R300_INPUT_ROUTE_SELECT_Y 1
# define R300_INPUT_ROUTE_SELECT_Z 2
# define R300_INPUT_ROUTE_SELECT_W 3
# define R300_INPUT_ROUTE_SELECT_ZERO 4
# define R300_INPUT_ROUTE_SELECT_ONE 5
# define R300_INPUT_ROUTE_SELECT_MASK 7
# define R300_INPUT_ROUTE_X_SHIFT 0
# define R300_INPUT_ROUTE_Y_SHIFT 3
# define R300_INPUT_ROUTE_Z_SHIFT 6
# define R300_INPUT_ROUTE_W_SHIFT 9
# define R300_INPUT_ROUTE_ENABLE (15 << 12)
#define R300_VAP_INPUT_ROUTE_1_1 0x21E4
#define R300_VAP_INPUT_ROUTE_1_2 0x21E8
#define R300_VAP_INPUT_ROUTE_1_3 0x21EC
#define R300_VAP_INPUT_ROUTE_1_4 0x21F0
#define R300_VAP_INPUT_ROUTE_1_5 0x21F4
#define R300_VAP_INPUT_ROUTE_1_6 0x21F8
#define R300_VAP_INPUT_ROUTE_1_7 0x21FC
/* END */
/* gap */
/* BEGIN: Upload vertex program and data
// The programmable vertex shader unit has a memory bank of unknown size
// that can be written to in 16 byte units by writing the address into
// UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
//
// Pointers into the memory bank are always in multiples of 16 bytes.
//
// The memory bank is divided into areas with fixed meaning.
//
// Starting at address UPLOAD_PROGRAM: Vertex program instructions.
// Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
// whereas the difference between known addresses suggests size 512.
//
// Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
// Native reported limits and the VPI layout suggest size 256, whereas
// difference between known addresses suggests size 512.
//
// At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
// floating point pointsize. The exact purpose of this state is uncertain,
// as there is also the R300_RE_POINTSIZE register.
//
// Multiple vertex programs and parameter sets can be loaded at once,
// which could explain the size discrepancy. */
#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
# define R300_PVS_UPLOAD_PROGRAM 0x00000000
# define R300_PVS_UPLOAD_PARAMETERS 0x00000200
# define R300_PVS_UPLOAD_POINTSIZE 0x00000406
/* gap */
#define R300_VAP_PVS_UPLOAD_DATA 0x2208
/* END */
/* gap */
/* I do not know the purpose of this register. However, I do know that
// it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
// for normal rendering. */
#define R300_VAP_UNKNOWN_221C 0x221C
# define R300_221C_NORMAL 0x00000000
# define R300_221C_CLEAR 0x0001C000
/* gap */
/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
// rendering commands and overwriting vertex program parameters.
// Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
// avoids bugs caused by still running shaders reading bad data from memory. */
#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
/* Absolutely no clue what this register is about. */
#define R300_VAP_UNKNOWN_2288 0x2288
# define R300_2288_R300 0x00750000 /* -- nh */
# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
/* gap */
/* Addresses are relative to the vertex program instruction area of the
// memory bank. PROGRAM_END points to the last instruction of the active
// program
//
// The meaning of the two UNKNOWN fields is obviously not known. However,
// experiments so far have shown that both *must* point to an instruction
// inside the vertex program, otherwise the GPU locks up.
// fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
// CNTL_1_UNKNOWN points to instruction where last write to position takes place.
// Most likely this is used to ignore rest of the program in cases where group of verts arent visible.
// For some reason this "section" is sometimes accepted other instruction that have
// no relationship with position calculations.
*/
#define R300_VAP_PVS_CNTL_1 0x22D0
# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
# define R300_PVS_CNTL_1_POS_END_SHIFT 10
# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
/* Addresses are relative the the vertex program parameters area. */
#define R300_VAP_PVS_CNTL_2 0x22D4
# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
#define R300_VAP_PVS_CNTL_3 0x22D8
# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
// immediate vertices */
#define R300_VAP_VTX_COLOR_R 0x2464
#define R300_VAP_VTX_COLOR_G 0x2468
#define R300_VAP_VTX_COLOR_B 0x246C
#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
#define R300_VAP_VTX_POS_0_Y_1 0x2494
#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
#define R300_VAP_VTX_POS_0_Y_2 0x24A4
#define R300_VAP_VTX_POS_0_Z_2 0x24A8
#define R300_VAP_VTX_END_OF_PKT 0x24AC /* write 0 to indicate end of packet? */
/* gap */
/* These are values from r300_reg/r300_reg.h - they are known to be correct
and are here so we can use one register file instead of several
- Vladimir */
#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5)
# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16)
#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004
/* each of the following is 3 bits wide, specifies number
of components */
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
/* UNK30 seems to enables point to quad transformation on textures
(or something closely related to that).
This bit is rather fatal at the time being due to lackings at pixel shader side */
#define R300_GB_ENABLE 0x4008
# define R300_GB_POINT_STUFF_ENABLE (1<<0)
# define R300_GB_LINE_STUFF_ENABLE (1<<1)
# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2)
# define R300_GB_STENCIL_AUTO_ENABLE (1<<4)
# define R300_GB_UNK30 (1<<30)
/* each of the following is 2 bits wide */
#define R300_GB_TEX_REPLICATE 0
#define R300_GB_TEX_ST 1
#define R300_GB_TEX_STR 2
# define R300_GB_TEX0_SOURCE_SHIFT 16
# define R300_GB_TEX1_SOURCE_SHIFT 18
# define R300_GB_TEX2_SOURCE_SHIFT 20
# define R300_GB_TEX3_SOURCE_SHIFT 22
# define R300_GB_TEX4_SOURCE_SHIFT 24
# define R300_GB_TEX5_SOURCE_SHIFT 26
# define R300_GB_TEX6_SOURCE_SHIFT 28
# define R300_GB_TEX7_SOURCE_SHIFT 30
/* MSPOS - positions for multisample antialiasing (?) */
#define R300_GB_MSPOS0 0x4010
/* shifts - each of the fields is 4 bits */
# define R300_GB_MSPOS0__MS_X0_SHIFT 0
# define R300_GB_MSPOS0__MS_Y0_SHIFT 4
# define R300_GB_MSPOS0__MS_X1_SHIFT 8
# define R300_GB_MSPOS0__MS_Y1_SHIFT 12
# define R300_GB_MSPOS0__MS_X2_SHIFT 16
# define R300_GB_MSPOS0__MS_Y2_SHIFT 20
# define R300_GB_MSPOS0__MSBD0_Y 24
# define R300_GB_MSPOS0__MSBD0_X 28
#define R300_GB_MSPOS1 0x4014
# define R300_GB_MSPOS1__MS_X3_SHIFT 0
# define R300_GB_MSPOS1__MS_Y3_SHIFT 4
# define R300_GB_MSPOS1__MS_X4_SHIFT 8
# define R300_GB_MSPOS1__MS_Y4_SHIFT 12
# define R300_GB_MSPOS1__MS_X5_SHIFT 16
# define R300_GB_MSPOS1__MS_Y5_SHIFT 20
# define R300_GB_MSPOS1__MSBD1 24
#define R300_GB_TILE_CONFIG 0x4018
# define R300_GB_TILE_ENABLE (1<<0)
# define R300_GB_TILE_PIPE_COUNT_RV300 0
# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1)
# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1)
# define R300_GB_TILE_SIZE_8 0
# define R300_GB_TILE_SIZE_16 (1<<4)
# define R300_GB_TILE_SIZE_32 (2<<4)
# define R300_GB_SUPER_SIZE_1 (0<<6)
# define R300_GB_SUPER_SIZE_2 (1<<6)
# define R300_GB_SUPER_SIZE_4 (2<<6)
# define R300_GB_SUPER_SIZE_8 (3<<6)
# define R300_GB_SUPER_SIZE_16 (4<<6)
# define R300_GB_SUPER_SIZE_32 (5<<6)
# define R300_GB_SUPER_SIZE_64 (6<<6)
# define R300_GB_SUPER_SIZE_128 (7<<6)
# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */
# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */
# define R300_GB_SUPER_TILE_A 0
# define R300_GB_SUPER_TILE_B (1<<15)
# define R300_GB_SUBPIXEL_1_12 0
# define R300_GB_SUBPIXEL_1_16 (1<<16)
#define R300_GB_FIFO_SIZE 0x4024
/* each of the following is 2 bits wide */
#define R300_GB_FIFO_SIZE_32 0
#define R300_GB_FIFO_SIZE_64 1
#define R300_GB_FIFO_SIZE_128 2
#define R300_GB_FIFO_SIZE_256 3
# define R300_SC_IFIFO_SIZE_SHIFT 0
# define R300_SC_TZFIFO_SIZE_SHIFT 2
# define R300_SC_BFIFO_SIZE_SHIFT 4
# define R300_US_OFIFO_SIZE_SHIFT 12
# define R300_US_WFIFO_SIZE_SHIFT 14
/* the following use the same constants as above, but meaning is
is times 2 (i.e. instead of 32 words it means 64 */
# define R300_RS_TFIFO_SIZE_SHIFT 6
# define R300_RS_CFIFO_SIZE_SHIFT 8
# define R300_US_RAM_SIZE_SHIFT 10
/* watermarks, 3 bits wide */
# define R300_RS_HIGHWATER_COL_SHIFT 16
# define R300_RS_HIGHWATER_TEX_SHIFT 19
# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
#define R300_GB_SELECT 0x401C
# define R300_GB_FOG_SELECT_C0A 0
# define R300_GB_FOG_SELECT_C1A 1
# define R300_GB_FOG_SELECT_C2A 2
# define R300_GB_FOG_SELECT_C3A 3
# define R300_GB_FOG_SELECT_1_1_W 4
# define R300_GB_FOG_SELECT_Z 5
# define R300_GB_DEPTH_SELECT_Z 0
# define R300_GB_DEPTH_SELECT_1_1_W (1<<3)
# define R300_GB_W_SELECT_1_W 0
# define R300_GB_W_SELECT_1 (1<<4)
#define R300_GB_AA_CONFIG 0x4020
# define R300_AA_ENABLE 0x01
# define R300_AA_SUBSAMPLES_2 0
# define R300_AA_SUBSAMPLES_3 (1<<1)
# define R300_AA_SUBSAMPLES_4 (2<<1)
# define R300_AA_SUBSAMPLES_6 (3<<1)
/* END */
/* gap */
/* The upper enable bits are guessed, based on fglrx reported limits. */
#define R300_TX_ENABLE 0x4104
# define R300_TX_ENABLE_0 (1 << 0)
# define R300_TX_ENABLE_1 (1 << 1)
# define R300_TX_ENABLE_2 (1 << 2)
# define R300_TX_ENABLE_3 (1 << 3)
# define R300_TX_ENABLE_4 (1 << 4)
# define R300_TX_ENABLE_5 (1 << 5)
# define R300_TX_ENABLE_6 (1 << 6)
# define R300_TX_ENABLE_7 (1 << 7)
# define R300_TX_ENABLE_8 (1 << 8)
# define R300_TX_ENABLE_9 (1 << 9)
# define R300_TX_ENABLE_10 (1 << 10)
# define R300_TX_ENABLE_11 (1 << 11)
# define R300_TX_ENABLE_12 (1 << 12)
# define R300_TX_ENABLE_13 (1 << 13)
# define R300_TX_ENABLE_14 (1 << 14)
# define R300_TX_ENABLE_15 (1 << 15)
/* The pointsize is given in multiples of 6. The pointsize can be
// enormous: Clear() renders a single point that fills the entire
// framebuffer. */
#define R300_RE_POINTSIZE 0x421C
# define R300_POINTSIZE_Y_SHIFT 0
# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
# define R300_POINTSIZE_X_SHIFT 16
# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
/* The line width is given in multiples of 6.
In default mode lines are classified as vertical lines.
HO: horizontal
VE: vertical or horizontal
HO & VE: no classification
*/
#define R300_RE_LINE_CNT 0x4234
# define R300_LINESIZE_SHIFT 0
# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6)
# define R300_LINE_CNT_HO (1 << 16)
# define R300_LINE_CNT_VE (1 << 17)
/* Some sort of scale or clamp value for texcoordless textures. */
#define R300_RE_UNK4238 0x4238
#define R300_RE_SHADE_MODEL 0x4278
# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa
# define R300_RE_SHADE_MODEL_FLAT 0x39595
/* Dangerous */
#define R300_RE_POLYGON_MODE 0x4288
# define R300_PM_ENABLED (1 << 0)
# define R300_PM_FRONT_POINT (0 << 0)
# define R300_PM_BACK_POINT (0 << 0)
# define R300_PM_FRONT_LINE (1 << 4)
# define R300_PM_FRONT_FILL (1 << 5)
# define R300_PM_BACK_LINE (1 << 7)
# define R300_PM_BACK_FILL (1 << 8)
/* Not sure why there are duplicate of factor and constant values.
My best guess so far is that there are seperate zbiases for test and write.
Ordering might be wrong.
Some of the tests indicate that fgl has a fallback implementation of zbias
via pixel shaders. */
#define R300_RE_ZBIAS_T_FACTOR 0x42A4
#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
#define R300_RE_ZBIAS_W_FACTOR 0x42AC
#define R300_RE_ZBIAS_W_CONSTANT 0x42B0
/* This register needs to be set to (1<<1) for RV350 to correctly
perform depth test (see --vb-triangles in r300_demo)
Don't know about other chips. - Vladimir
This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
My guess is that there are two bits for each zbias primitive (FILL, LINE, POINT).
One to enable depth test and one for depth write.
Yet this doesnt explain why depth writes work ...
*/
#define R300_RE_OCCLUSION_CNTL 0x42B4
# define R300_OCCLUSION_ON (1<<1)
#define R300_RE_CULL_CNTL 0x42B8
# define R300_CULL_FRONT (1 << 0)
# define R300_CULL_BACK (1 << 1)
# define R300_FRONT_FACE_CCW (0 << 2)
# define R300_FRONT_FACE_CW (1 << 2)
/* BEGIN: Rasterization / Interpolators - many guesses
// 0_UNKNOWN_18 has always been set except for clear operations.
// TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
// on the vertex program, *not* the fragment program) */
#define R300_RS_CNTL_0 0x4300
# define R300_RS_CNTL_TC_CNT_SHIFT 2
# define R300_RS_CNTL_TC_CNT_MASK (7 << 2)
# define R300_RS_CNTL_CI_CNT_SHIFT 7 /* number of color interpolators used */
# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18)
/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */
#define R300_RS_CNTL_1 0x4304
/* gap */
/* Only used for texture coordinates.
// Use the source field to route texture coordinate input from the vertex program
// to the desired interpolator. Note that the source field is relative to the
// outputs the vertex program *actually* writes. If a vertex program only writes
// texcoord[1], this will be source index 0.
// Set INTERP_USED on all interpolators that produce data used by the
// fragment program. INTERP_USED looks like a swizzling mask, but
// I haven't seen it used that way.
//
// Note: The _UNKNOWN constants are always set in their respective register.
// I don't know if this is necessary. */
#define R300_RS_INTERP_0 0x4310
#define R300_RS_INTERP_1 0x4314
# define R300_RS_INTERP_1_UNKNOWN 0x40
#define R300_RS_INTERP_2 0x4318
# define R300_RS_INTERP_2_UNKNOWN 0x80
#define R300_RS_INTERP_3 0x431C
# define R300_RS_INTERP_3_UNKNOWN 0xC0
#define R300_RS_INTERP_4 0x4320
#define R300_RS_INTERP_5 0x4324
#define R300_RS_INTERP_6 0x4328
#define R300_RS_INTERP_7 0x432C
# define R300_RS_INTERP_SRC_SHIFT 2
# define R300_RS_INTERP_SRC_MASK (7 << 2)
# define R300_RS_INTERP_USED 0x00D10000
/* These DWORDs control how vertex data is routed into fragment program
// registers, after interpolators. */
#define R300_RS_ROUTE_0 0x4330
#define R300_RS_ROUTE_1 0x4334
#define R300_RS_ROUTE_2 0x4338
#define R300_RS_ROUTE_3 0x433C /* GUESS */
#define R300_RS_ROUTE_4 0x4340 /* GUESS */
#define R300_RS_ROUTE_5 0x4344 /* GUESS */
#define R300_RS_ROUTE_6 0x4348 /* GUESS */
#define R300_RS_ROUTE_7 0x434C /* GUESS */
# define R300_RS_ROUTE_SOURCE_INTERP_0 0
# define R300_RS_ROUTE_SOURCE_INTERP_1 1
# define R300_RS_ROUTE_SOURCE_INTERP_2 2
# define R300_RS_ROUTE_SOURCE_INTERP_3 3
# define R300_RS_ROUTE_SOURCE_INTERP_4 4
# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
# define R300_RS_ROUTE_DEST_SHIFT 6
# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
/* Special handling for color: When the fragment program uses color,
// the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
// color register index. */
# define R300_RS_ROUTE_0_COLOR (1 << 14)
# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
/* As above, but for secondary color */
# define R300_RS_ROUTE_1_COLOR1 (1 << 14)
# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17)
# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
/* END */
/* BEGIN: Scissors and cliprects
// There are four clipping rectangles. Their corner coordinates are inclusive.
// Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
// on whether the pixel is inside cliprects 0-3, respectively. For example,
// if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
// the number 3 (binary 0011).
// Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
// the pixel is rasterized.
//
// In addition to this, there is a scissors rectangle. Only pixels inside the
// scissors rectangle are drawn. (coordinates are inclusive)
//
// For some reason, the top-left corner of the framebuffer is at (1440, 1440)
// for the purpose of clipping and scissors. */
#define R300_RE_CLIPRECT_TL_0 0x43B0
#define R300_RE_CLIPRECT_BR_0 0x43B4
#define R300_RE_CLIPRECT_TL_1 0x43B8
#define R300_RE_CLIPRECT_BR_1 0x43BC
#define R300_RE_CLIPRECT_TL_2 0x43C0
#define R300_RE_CLIPRECT_BR_2 0x43C4
#define R300_RE_CLIPRECT_TL_3 0x43C8
#define R300_RE_CLIPRECT_BR_3 0x43CC
# define R300_CLIPRECT_OFFSET 1440
# define R300_CLIPRECT_MASK 0x1FFF
# define R300_CLIPRECT_X_SHIFT 0
# define R300_CLIPRECT_X_MASK (0x1FFF << 0)
# define R300_CLIPRECT_Y_SHIFT 13
# define R300_CLIPRECT_Y_MASK (0x1FFF << 13)
#define R300_RE_CLIPRECT_CNTL 0x43D0
# define R300_CLIP_OUT (1 << 0)
# define R300_CLIP_0 (1 << 1)
# define R300_CLIP_1 (1 << 2)
# define R300_CLIP_10 (1 << 3)
# define R300_CLIP_2 (1 << 4)
# define R300_CLIP_20 (1 << 5)
# define R300_CLIP_21 (1 << 6)
# define R300_CLIP_210 (1 << 7)
# define R300_CLIP_3 (1 << 8)
# define R300_CLIP_30 (1 << 9)
# define R300_CLIP_31 (1 << 10)
# define R300_CLIP_310 (1 << 11)
# define R300_CLIP_32 (1 << 12)
# define R300_CLIP_320 (1 << 13)
# define R300_CLIP_321 (1 << 14)
# define R300_CLIP_3210 (1 << 15)
/* gap */
#define R300_RE_SCISSORS_TL 0x43E0
#define R300_RE_SCISSORS_BR 0x43E4
# define R300_SCISSORS_OFFSET 1440
# define R300_SCISSORS_X_SHIFT 0
# define R300_SCISSORS_X_MASK (0x1FFF << 0)
# define R300_SCISSORS_Y_SHIFT 13
# define R300_SCISSORS_Y_MASK (0x1FFF << 13)
/* END */
/* BEGIN: Texture specification
// The texture specification dwords are grouped by meaning and not by texture unit.
// This means that e.g. the offset for texture image unit N is found in register
// TX_OFFSET_0 + (4*N) */
#define R300_TX_FILTER_0 0x4400
# define R300_TX_REPEAT 0
# define R300_TX_MIRRORED 1
# define R300_TX_CLAMP 4
# define R300_TX_CLAMP_TO_EDGE 2
# define R300_TX_CLAMP_TO_BORDER 6
# define R300_TX_WRAP_S_SHIFT 0
# define R300_TX_WRAP_S_MASK (7 << 0)
# define R300_TX_WRAP_T_SHIFT 3
# define R300_TX_WRAP_T_MASK (7 << 3)
# define R300_TX_WRAP_Q_SHIFT 6
# define R300_TX_WRAP_Q_MASK (7 << 6)
# define R300_TX_MAG_FILTER_NEAREST (1 << 9)
# define R300_TX_MAG_FILTER_LINEAR (2 << 9)
# define R300_TX_MAG_FILTER_MASK (3 << 9)
# define R300_TX_MIN_FILTER_NEAREST (1 << 11)
# define R300_TX_MIN_FILTER_LINEAR (2 << 11)
# define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11)
# define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11)
# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
/* NOTE: NEAREST doesnt seem to exist.
Im not seting MAG_FILTER_MASK and (3 << 11) on for all
anisotropy modes because that would void selected mag filter */
# define R300_TX_MIN_FILTER_ANISO_NEAREST ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
# define R300_TX_MIN_FILTER_ANISO_LINEAR ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR ((2 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
# define R300_TX_MAX_ANISO_8_TO_1 (6 << 21)
# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
# define R300_TX_MAX_ANISO_MASK (14 << 21)
#define R300_TX_UNK1_0 0x4440
# define R300_LOD_BIAS_MASK 0x1fff
#define R300_TX_SIZE_0 0x4480
# define R300_TX_WIDTHMASK_SHIFT 0
# define R300_TX_WIDTHMASK_MASK (2047 << 0)
# define R300_TX_HEIGHTMASK_SHIFT 11
# define R300_TX_HEIGHTMASK_MASK (2047 << 11)
# define R300_TX_UNK23 (1 << 23)
# define R300_TX_SIZE_SHIFT 26 /* largest of width, height */
# define R300_TX_SIZE_MASK (15 << 26)
#define R300_TX_FORMAT_0 0x44C0
/* The interpretation of the format word by Wladimir van der Laan */
/* The X, Y, Z and W refer to the layout of the components.
They are given meanings as R, G, B and Alpha by the swizzle
specification */
# define R300_TX_FORMAT_X8 0x0
# define R300_TX_FORMAT_X16 0x1
# define R300_TX_FORMAT_Y4X4 0x2
# define R300_TX_FORMAT_Y8X8 0x3
# define R300_TX_FORMAT_Y16X16 0x4
# define R300_TX_FORMAT_Z3Y3X2 0x5
# define R300_TX_FORMAT_Z5Y6X5 0x6
# define R300_TX_FORMAT_Z6Y5X5 0x7
# define R300_TX_FORMAT_Z11Y11X10 0x8
# define R300_TX_FORMAT_Z10Y11X11 0x9
# define R300_TX_FORMAT_W4Z4Y4X4 0xA
# define R300_TX_FORMAT_W1Z5Y5X5 0xB
# define R300_TX_FORMAT_W8Z8Y8X8 0xC
# define R300_TX_FORMAT_W2Z10Y10X10 0xD
# define R300_TX_FORMAT_W16Z16Y16X16 0xE
# define R300_TX_FORMAT_DXT1 0xF
# define R300_TX_FORMAT_DXT3 0x10
# define R300_TX_FORMAT_DXT5 0x11
# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
/* 0x16 - some 16 bit green format.. ?? */
# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
/* gap */
/* Floating point formats */
/* Note - hardware supports both 16 and 32 bit floating point */
# define R300_TX_FORMAT_FL_I16 0x18
# define R300_TX_FORMAT_FL_I16A16 0x19
# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
# define R300_TX_FORMAT_FL_I32 0x1B
# define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
/* alpha modes, convenience mostly */
/* if you have alpha, pick constant appropriate to the
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
# define R300_TX_FORMAT_ALPHA_1CH 0x000
# define R300_TX_FORMAT_ALPHA_2CH 0x200
# define R300_TX_FORMAT_ALPHA_4CH 0x600
# define R300_TX_FORMAT_ALPHA_NONE 0xA00
/* Swizzling */
/* constants */
# define R300_TX_FORMAT_X 0
# define R300_TX_FORMAT_Y 1
# define R300_TX_FORMAT_Z 2
# define R300_TX_FORMAT_W 3
# define R300_TX_FORMAT_ZERO 4
# define R300_TX_FORMAT_ONE 5
# define R300_TX_FORMAT_CUT_Z 6 /* 2.0*Z, everything above 1.0 is set to 0.0 */
# define R300_TX_FORMAT_CUT_W 7 /* 2.0*W, everything above 1.0 is set to 0.0 */
# define R300_TX_FORMAT_B_SHIFT 18
# define R300_TX_FORMAT_G_SHIFT 15
# define R300_TX_FORMAT_R_SHIFT 12
# define R300_TX_FORMAT_A_SHIFT 9
/* Convenience macro to take care of layout and swizzling */
# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) (\
((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
| ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
| ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
| ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
| (R300_TX_FORMAT_##FMT) \
)
/* These can be ORed with result of R300_EASY_TX_FORMAT() */
/* We don't really know what they do. Take values from a constant color ? */
# define R300_TX_FORMAT_CONST_X (1<<5)
# define R300_TX_FORMAT_CONST_Y (2<<5)
# define R300_TX_FORMAT_CONST_Z (4<<5)
# define R300_TX_FORMAT_CONST_W (8<<5)
# define R300_TX_FORMAT_YUV_MODE 0x00800000
#define R300_TX_OFFSET_0 0x4540
/* BEGIN: Guess from R200 */
# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
# define R300_TXO_OFFSET_MASK 0xffffffe0
# define R300_TXO_OFFSET_SHIFT 5
/* END */
#define R300_TX_UNK4_0 0x4580
#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
/* END */
/* BEGIN: Fragment program instruction set
// Fragment programs are written directly into register space.
// There are separate instruction streams for texture instructions and ALU
// instructions.
// In order to synchronize these streams, the program is divided into up
// to 4 nodes. Each node begins with a number of TEX operations, followed
// by a number of ALU operations.
// The first node can have zero TEX ops, all subsequent nodes must have at least
// one TEX ops.
// All nodes must have at least one ALU op.
//
// The index of the last node is stored in PFS_CNTL_0: A value of 0 means
// 1 node, a value of 3 means 4 nodes.
// The total amount of instructions is defined in PFS_CNTL_2. The offsets are
// offsets into the respective instruction streams, while *_END points to the
// last instruction relative to this offset. */
#define R300_PFS_CNTL_0 0x4600
# define R300_PFS_CNTL_LAST_NODES_SHIFT 0
# define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
# define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
#define R300_PFS_CNTL_1 0x4604
/* There is an unshifted value here which has so far always been equal to the
// index of the highest used temporary register. */
#define R300_PFS_CNTL_2 0x4608
# define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
# define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
# define R300_PFS_CNTL_ALU_END_SHIFT 6
# define R300_PFS_CNTL_ALU_END_MASK (63 << 0)
# define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12
# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
# define R300_PFS_CNTL_TEX_END_SHIFT 18
# define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
/* gap */
/* Nodes are stored backwards. The last active node is always stored in
// PFS_NODE_3.
// Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
// first node is stored in NODE_2, the second node is stored in NODE_3.
//
// Offsets are relative to the master offset from PFS_CNTL_2.
// LAST_NODE is set for the last node, and only for the last node. */
#define R300_PFS_NODE_0 0x4610
#define R300_PFS_NODE_1 0x4614
#define R300_PFS_NODE_2 0x4618
#define R300_PFS_NODE_3 0x461C
# define R300_PFS_NODE_ALU_OFFSET_SHIFT 0
# define R300_PFS_NODE_ALU_OFFSET_MASK (63 << 0)
# define R300_PFS_NODE_ALU_END_SHIFT 6
# define R300_PFS_NODE_ALU_END_MASK (63 << 6)
# define R300_PFS_NODE_TEX_OFFSET_SHIFT 12
# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
# define R300_PFS_NODE_TEX_END_SHIFT 17
# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
# define R300_PFS_NODE_LAST_NODE (1 << 22)
/* TEX
// As far as I can tell, texture instructions cannot write into output
// registers directly. A subsequent ALU instruction is always necessary,
// even if it's just MAD o0, r0, 1, 0 */
#define R300_PFS_TEXI_0 0x4620
# define R300_FPITX_SRC_SHIFT 0
# define R300_FPITX_SRC_MASK (31 << 0)
# define R300_FPITX_SRC_CONST (1 << 5) /* GUESS */
# define R300_FPITX_DST_SHIFT 6
# define R300_FPITX_DST_MASK (31 << 6)
# define R300_FPITX_IMAGE_SHIFT 11
# define R300_FPITX_IMAGE_MASK (15 << 11) /* GUESS based on layout and native limits */
/* Unsure if these are opcodes, or some kind of bitfield, but this is how
* they were set when I checked
*/
# define R300_FPITX_OPCODE_SHIFT 15
# define R300_FPITX_OP_TEX 1
# define R300_FPITX_OP_TXP 3
# define R300_FPITX_OP_TXB 4
/* ALU
// The ALU instructions register blocks are enumerated according to the order
// in which fglrx. I assume there is space for 64 instructions, since
// each block has space for a maximum of 64 DWORDs, and this matches reported
// native limits.
//
// The basic functional block seems to be one MAD for each color and alpha,
// and an adder that adds all components after the MUL.
// - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
// - DP4: Use OUTC_DP4, OUTA_DP4
// - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
// - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
// - CMP: If ARG2 < 0, return ARG1, else return ARG0
// - FLR: use FRC+MAD
// - XPD: use MAD+MAD
// - SGE, SLT: use MAD+CMP
// - RSQ: use ABS modifier for argument
// - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP)
// into color register
// - apparently, there's no quick DST operation
// - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
// - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
// - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
//
// Operand selection
// First stage selects three sources from the available registers and
// constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
// fglrx sorts the three source fields: Registers before constants,
// lower indices before higher indices; I do not know whether this is necessary.
// fglrx fills unused sources with "read constant 0"
// According to specs, you cannot select more than two different constants.
//
// Second stage selects the operands from the sources. This is defined in
// INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
// zero and one.
// Swizzling and negation happens in this stage, as well.
//
// Important: Color and alpha seem to be mostly separate, i.e. their sources
// selection appears to be fully independent (the register storage is probably
// physically split into a color and an alpha section).
// However (because of the apparent physical split), there is some interaction
// WRT swizzling. If, for example, you want to load an R component into an
// Alpha operand, this R component is taken from a *color* source, not from
// an alpha source. The corresponding register doesn't even have to appear in
// the alpha sources list. (I hope this alll makes sense to you)
//
// Destination selection
// The destination register index is in FPI1 (color) and FPI3 (alpha) together
// with enable bits.
// There are separate enable bits for writing into temporary registers
// (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* /DSTA_OUTPUT).
// You can write to both at once, or not write at all (the same index
// must be used for both).
//
// Note: There is a special form for LRP
// - Argument order is the same as in ARB_fragment_program.
// - Operation is MAD
// - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
// - Set FPI0/FPI2_SPECIAL_LRP
// Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */
#define R300_PFS_INSTR1_0 0x46C0
# define R300_FPI1_SRC0C_SHIFT 0
# define R300_FPI1_SRC0C_MASK (31 << 0)
# define R300_FPI1_SRC0C_CONST (1 << 5)
# define R300_FPI1_SRC1C_SHIFT 6
# define R300_FPI1_SRC1C_MASK (31 << 6)
# define R300_FPI1_SRC1C_CONST (1 << 11)
# define R300_FPI1_SRC2C_SHIFT 12
# define R300_FPI1_SRC2C_MASK (31 << 12)
# define R300_FPI1_SRC2C_CONST (1 << 17)
# define R300_FPI1_DSTC_SHIFT 18
# define R300_FPI1_DSTC_MASK (31 << 18)
# define R300_FPI1_DSTC_REG_X (1 << 23)
# define R300_FPI1_DSTC_REG_Y (1 << 24)
# define R300_FPI1_DSTC_REG_Z (1 << 25)
# define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
#define R300_PFS_INSTR3_0 0x47C0
# define R300_FPI3_SRC0A_SHIFT 0
# define R300_FPI3_SRC0A_MASK (31 << 0)
# define R300_FPI3_SRC0A_CONST (1 << 5)
# define R300_FPI3_SRC1A_SHIFT 6
# define R300_FPI3_SRC1A_MASK (31 << 6)
# define R300_FPI3_SRC1A_CONST (1 << 11)
# define R300_FPI3_SRC2A_SHIFT 12
# define R300_FPI3_SRC2A_MASK (31 << 12)
# define R300_FPI3_SRC2A_CONST (1 << 17)
# define R300_FPI3_DSTA_SHIFT 18
# define R300_FPI3_DSTA_MASK (31 << 18)
# define R300_FPI3_DSTA_REG (1 << 23)
# define R300_FPI3_DSTA_OUTPUT (1 << 24)
#define R300_PFS_INSTR0_0 0x48C0
# define R300_FPI0_ARGC_SRC0C_XYZ 0
# define R300_FPI0_ARGC_SRC0C_XXX 1
# define R300_FPI0_ARGC_SRC0C_YYY 2
# define R300_FPI0_ARGC_SRC0C_ZZZ 3
# define R300_FPI0_ARGC_SRC1C_XYZ 4
# define R300_FPI0_ARGC_SRC1C_XXX 5
# define R300_FPI0_ARGC_SRC1C_YYY 6
# define R300_FPI0_ARGC_SRC1C_ZZZ 7
# define R300_FPI0_ARGC_SRC2C_XYZ 8
# define R300_FPI0_ARGC_SRC2C_XXX 9
# define R300_FPI0_ARGC_SRC2C_YYY 10
# define R300_FPI0_ARGC_SRC2C_ZZZ 11
# define R300_FPI0_ARGC_SRC0A 12
# define R300_FPI0_ARGC_SRC1A 13
# define R300_FPI0_ARGC_SRC2A 14
# define R300_FPI0_ARGC_SRC1C_LRP 15
# define R300_FPI0_ARGC_ZERO 20
# define R300_FPI0_ARGC_ONE 21
# define R300_FPI0_ARGC_HALF 22 /* GUESS */
# define R300_FPI0_ARGC_SRC0C_YZX 23
# define R300_FPI0_ARGC_SRC1C_YZX 24
# define R300_FPI0_ARGC_SRC2C_YZX 25
# define R300_FPI0_ARGC_SRC0C_ZXY 26
# define R300_FPI0_ARGC_SRC1C_ZXY 27
# define R300_FPI0_ARGC_SRC2C_ZXY 28
# define R300_FPI0_ARGC_SRC0CA_WZY 29
# define R300_FPI0_ARGC_SRC1CA_WZY 30
# define R300_FPI0_ARGC_SRC2CA_WZY 31
# define R300_FPI0_ARG0C_SHIFT 0
# define R300_FPI0_ARG0C_MASK (31 << 0)
# define R300_FPI0_ARG0C_NEG (1 << 5)
# define R300_FPI0_ARG0C_ABS (1 << 6)
# define R300_FPI0_ARG1C_SHIFT 7
# define R300_FPI0_ARG1C_MASK (31 << 7)
# define R300_FPI0_ARG1C_NEG (1 << 12)
# define R300_FPI0_ARG1C_ABS (1 << 13)
# define R300_FPI0_ARG2C_SHIFT 14
# define R300_FPI0_ARG2C_MASK (31 << 14)
# define R300_FPI0_ARG2C_NEG (1 << 19)
# define R300_FPI0_ARG2C_ABS (1 << 20)
# define R300_FPI0_SPECIAL_LRP (1 << 21)
# define R300_FPI0_OUTC_MAD (0 << 23)
# define R300_FPI0_OUTC_DP3 (1 << 23)
# define R300_FPI0_OUTC_DP4 (2 << 23)
# define R300_FPI0_OUTC_MIN (4 << 23)
# define R300_FPI0_OUTC_MAX (5 << 23)
# define R300_FPI0_OUTC_CMP (8 << 23)
# define R300_FPI0_OUTC_FRC (9 << 23)
# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
# define R300_FPI0_OUTC_SAT (1 << 30)
# define R300_FPI0_UNKNOWN_31 (1 << 31)
#define R300_PFS_INSTR2_0 0x49C0
# define R300_FPI2_ARGA_SRC0C_X 0
# define R300_FPI2_ARGA_SRC0C_Y 1
# define R300_FPI2_ARGA_SRC0C_Z 2
# define R300_FPI2_ARGA_SRC1C_X 3
# define R300_FPI2_ARGA_SRC1C_Y 4
# define R300_FPI2_ARGA_SRC1C_Z 5
# define R300_FPI2_ARGA_SRC2C_X 6
# define R300_FPI2_ARGA_SRC2C_Y 7
# define R300_FPI2_ARGA_SRC2C_Z 8
# define R300_FPI2_ARGA_SRC0A 9
# define R300_FPI2_ARGA_SRC1A 10
# define R300_FPI2_ARGA_SRC2A 11
# define R300_FPI2_ARGA_SRC1A_LRP 15
# define R300_FPI2_ARGA_ZERO 16
# define R300_FPI2_ARGA_ONE 17
# define R300_FPI2_ARGA_HALF 18 /* GUESS */
# define R300_FPI2_ARG0A_SHIFT 0
# define R300_FPI2_ARG0A_MASK (31 << 0)
# define R300_FPI2_ARG0A_NEG (1 << 5)
# define R300_FPI2_ARG0A_ABS (1 << 6) /* GUESS */
# define R300_FPI2_ARG1A_SHIFT 7
# define R300_FPI2_ARG1A_MASK (31 << 7)
# define R300_FPI2_ARG1A_NEG (1 << 12)
# define R300_FPI2_ARG1A_ABS (1 << 13) /* GUESS */
# define R300_FPI2_ARG2A_SHIFT 14
# define R300_FPI2_ARG2A_MASK (31 << 14)
# define R300_FPI2_ARG2A_NEG (1 << 19)
# define R300_FPI2_ARG2A_ABS (1 << 20) /* GUESS */
# define R300_FPI2_SPECIAL_LRP (1 << 21)
# define R300_FPI2_OUTA_MAD (0 << 23)
# define R300_FPI2_OUTA_DP4 (1 << 23)
# define R300_FPI2_OUTA_MIN (2 << 23)
# define R300_FPI2_OUTA_MAX (3 << 23)
# define R300_FPI2_OUTA_CMP (6 << 23)
# define R300_FPI2_OUTA_FRC (7 << 23)
# define R300_FPI2_OUTA_EX2 (8 << 23)
# define R300_FPI2_OUTA_LG2 (9 << 23)
# define R300_FPI2_OUTA_RCP (10 << 23)
# define R300_FPI2_OUTA_RSQ (11 << 23)
# define R300_FPI2_OUTA_SAT (1 << 30)
# define R300_FPI2_UNKNOWN_31 (1 << 31)
/* END */
/* gap */
#define R300_PP_ALPHA_TEST 0x4BD4
# define R300_REF_ALPHA_MASK 0x000000ff
# define R300_ALPHA_TEST_FAIL (0 << 8)
# define R300_ALPHA_TEST_LESS (1 << 8)
# define R300_ALPHA_TEST_LEQUAL (3 << 8)
# define R300_ALPHA_TEST_EQUAL (2 << 8)
# define R300_ALPHA_TEST_GEQUAL (6 << 8)
# define R300_ALPHA_TEST_GREATER (4 << 8)
# define R300_ALPHA_TEST_NEQUAL (5 << 8)
# define R300_ALPHA_TEST_PASS (7 << 8)
# define R300_ALPHA_TEST_OP_MASK (7 << 8)
# define R300_ALPHA_TEST_ENABLE (1 << 11)
/* gap */
/* Fragment program parameters in 7.16 floating point */
#define R300_PFS_PARAM_0_X 0x4C00
#define R300_PFS_PARAM_0_Y 0x4C04
#define R300_PFS_PARAM_0_Z 0x4C08
#define R300_PFS_PARAM_0_W 0x4C0C
/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
#define R300_PFS_PARAM_31_X 0x4DF0
#define R300_PFS_PARAM_31_Y 0x4DF4
#define R300_PFS_PARAM_31_Z 0x4DF8
#define R300_PFS_PARAM_31_W 0x4DFC
/* Notes:
// - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in the application
// - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND are set to the same
// function (both registers are always set up completely in any case)
// - Most blend flags are simply copied from R200 and not tested yet */
#define R300_RB3D_CBLEND 0x4E04
#define R300_RB3D_ABLEND 0x4E08
/* the following only appear in CBLEND */
# define R300_BLEND_ENABLE (1 << 0)
# define R300_BLEND_UNKNOWN (3 << 1)
# define R300_BLEND_NO_SEPARATE (1 << 3)
/* the following are shared between CBLEND and ABLEND */
# define R300_FCN_MASK (3 << 12)
# define R300_COMB_FCN_ADD_CLAMP (0 << 12)
# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
# define R300_COMB_FCN_SUB_CLAMP (2 << 12)
# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
# define R300_SRC_BLEND_GL_ZERO (32 << 16)
# define R300_SRC_BLEND_GL_ONE (33 << 16)
# define R300_SRC_BLEND_GL_SRC_COLOR (34 << 16)
# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
# define R300_SRC_BLEND_GL_DST_COLOR (36 << 16)
# define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
# define R300_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
# define R300_SRC_BLEND_GL_DST_ALPHA (40 << 16)
# define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
# define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
# define R300_SRC_BLEND_MASK (63 << 16)
# define R300_DST_BLEND_GL_ZERO (32 << 24)
# define R300_DST_BLEND_GL_ONE (33 << 24)
# define R300_DST_BLEND_GL_SRC_COLOR (34 << 24)
# define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
# define R300_DST_BLEND_GL_DST_COLOR (36 << 24)
# define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
# define R300_DST_BLEND_GL_SRC_ALPHA (38 << 24)
# define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
# define R300_DST_BLEND_GL_DST_ALPHA (40 << 24)
# define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
# define R300_DST_BLEND_MASK (63 << 24)
#define R300_RB3D_COLORMASK 0x4E0C
# define R300_COLORMASK0_B (1<<0)
# define R300_COLORMASK0_G (1<<1)
# define R300_COLORMASK0_R (1<<2)
# define R300_COLORMASK0_A (1<<3)
/* gap */
#define R300_RB3D_COLOROFFSET0 0x4E28
# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
/* gap */
/* Bit 16: Larger tiles
// Bit 17: 4x2 tiles
// Bit 18: Extremely weird tile like, but some pixels duplicated? */
#define R300_RB3D_COLORPITCH0 0x4E38
# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
# define R300_COLOR_FORMAT_RGB565 (2 << 22)
# define R300_COLOR_FORMAT_ARGB8888 (3 << 22)
#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
/* gap */
/* Guess by Vladimir.
// Set to 0A before 3D operations, set to 02 afterwards. */
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C
# define R300_RB3D_DSTCACHE_02 0x00000002
# define R300_RB3D_DSTCACHE_0A 0x0000000A
/* gap */
/* There seems to be no "write only" setting, so use Z-test = ALWAYS for this. */
/* Bit (1<<8) is the "test" bit. so plain write is 6 - vd */
#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00
# define R300_RB3D_Z_DISABLED_1 0x00000010 /* GUESS */
# define R300_RB3D_Z_DISABLED_2 0x00000014 /* GUESS */
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_STENCIL_ENABLE 0x00000001
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
/* functions */
# define R300_ZS_NEVER 0
# define R300_ZS_LESS 1
# define R300_ZS_LEQUAL 2
# define R300_ZS_EQUAL 3
# define R300_ZS_GEQUAL 4
# define R300_ZS_GREATER 5
# define R300_ZS_NOTEQUAL 6
# define R300_ZS_ALWAYS 7
# define R300_ZS_MASK 7
/* operations */
# define R300_ZS_KEEP 0
# define R300_ZS_ZERO 1
# define R300_ZS_REPLACE 2
# define R300_ZS_INCR 3
# define R300_ZS_DECR 4
# define R300_ZS_INVERT 5
# define R300_ZS_INCR_WRAP 6
# define R300_ZS_DECR_WRAP 7
/* front and back refer to operations done for front
and back faces, i.e. separate stencil function support */
# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0
# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3
# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6
# define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9
# define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12
# define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15
# define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18
# define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21
# define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24
#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08
# define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0
# define R300_RB3D_ZS2_STENCIL_MASK 0xFF
# define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8
# define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16
/* gap */
#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10
# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
/* gap */
#define R300_RB3D_DEPTHOFFSET 0x4F20
#define R300_RB3D_DEPTHPITCH 0x4F24
# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
/* BEGIN: Vertex program instruction set
// Every instruction is four dwords long:
// DWORD 0: output and opcode
// DWORD 1: first argument
// DWORD 2: second argument
// DWORD 3: third argument
//
// Notes:
// - ABS r, a is implemented as MAX r, a, -a
// - MOV is implemented as ADD to zero
// - XPD is implemented as MUL + MAD
// - FLR is implemented as FRC + ADD
// - apparently, fglrx tries to schedule instructions so that there is at least
// one instruction between the write to a temporary and the first read
// from said temporary; however, violations of this scheduling are allowed
// - register indices seem to be unrelated with OpenGL aliasing to conventional state
// - only one attribute and one parameter can be loaded at a time; however, the
// same attribute/parameter can be used for more than one argument
// - the second software argument for POW is the third hardware argument (no idea why)
// - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
//
// There is some magic surrounding LIT:
// The single argument is replicated across all three inputs, but swizzled:
// First argument: xyzy
// Second argument: xyzx
// Third argument: xyzw
// Whenever the result is used later in the fragment program, fglrx forces x and w
// to be 1.0 in the input selection; I don't know whether this is strictly necessary */
#define R300_VPI_OUT_OP_DOT (1 << 0)
#define R300_VPI_OUT_OP_MUL (2 << 0)
#define R300_VPI_OUT_OP_ADD (3 << 0)
#define R300_VPI_OUT_OP_MAD (4 << 0)
#define R300_VPI_OUT_OP_DST (5 << 0)
#define R300_VPI_OUT_OP_FRC (6 << 0)
#define R300_VPI_OUT_OP_MAX (7 << 0)
#define R300_VPI_OUT_OP_MIN (8 << 0)
#define R300_VPI_OUT_OP_SGE (9 << 0)
#define R300_VPI_OUT_OP_SLT (10 << 0)
#define R300_VPI_OUT_OP_UNK12 (12 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
#define R300_VPI_OUT_OP_EXP (65 << 0)
#define R300_VPI_OUT_OP_LOG (66 << 0)
#define R300_VPI_OUT_OP_UNK67 (67 << 0) /* Used in fog computations, scalar(scalar) */
#define R300_VPI_OUT_OP_LIT (68 << 0)
#define R300_VPI_OUT_OP_POW (69 << 0)
#define R300_VPI_OUT_OP_RCP (70 << 0)
#define R300_VPI_OUT_OP_RSQ (72 << 0)
#define R300_VPI_OUT_OP_UNK73 (73 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
#define R300_VPI_OUT_OP_EX2 (75 << 0)
#define R300_VPI_OUT_OP_LG2 (76 << 0)
#define R300_VPI_OUT_OP_MAD_2 (128 << 0)
#define R300_VPI_OUT_OP_UNK129 (129 << 0) /* all temps, vector(scalar, vector, vector) */
#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8)
#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8)
#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8)
#define R300_VPI_OUT_REG_INDEX_SHIFT 13
#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) /* GUESS based on fglrx native limits */
#define R300_VPI_OUT_WRITE_X (1 << 20)
#define R300_VPI_OUT_WRITE_Y (1 << 21)
#define R300_VPI_OUT_WRITE_Z (1 << 22)
#define R300_VPI_OUT_WRITE_W (1 << 23)
#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0)
#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0)
#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0)
#define R300_VPI_IN_REG_CLASS_NONE (9 << 0)
#define R300_VPI_IN_REG_CLASS_MASK (31 << 0) /* GUESS */
#define R300_VPI_IN_REG_INDEX_SHIFT 5
#define R300_VPI_IN_REG_INDEX_MASK (255 << 5) /* GUESS based on fglrx native limits */
/* The R300 can select components from the input register arbitrarily.
// Use the following constants, shifted by the component shift you
// want to select */
#define R300_VPI_IN_SELECT_X 0
#define R300_VPI_IN_SELECT_Y 1
#define R300_VPI_IN_SELECT_Z 2
#define R300_VPI_IN_SELECT_W 3
#define R300_VPI_IN_SELECT_ZERO 4
#define R300_VPI_IN_SELECT_ONE 5
#define R300_VPI_IN_SELECT_MASK 7
#define R300_VPI_IN_X_SHIFT 13
#define R300_VPI_IN_Y_SHIFT 16
#define R300_VPI_IN_Z_SHIFT 19
#define R300_VPI_IN_W_SHIFT 22
#define R300_VPI_IN_NEG_X (1 << 25)
#define R300_VPI_IN_NEG_Y (1 << 26)
#define R300_VPI_IN_NEG_Z (1 << 27)
#define R300_VPI_IN_NEG_W (1 << 28)
/* END */
//BEGIN: Packet 3 commands
// A primitive emission dword.
#define R300_PRIM_TYPE_NONE (0 << 0)
#define R300_PRIM_TYPE_POINT (1 << 0)
#define R300_PRIM_TYPE_LINE (2 << 0)
#define R300_PRIM_TYPE_LINE_STRIP (3 << 0)
#define R300_PRIM_TYPE_TRI_LIST (4 << 0)
#define R300_PRIM_TYPE_TRI_FAN (5 << 0)
#define R300_PRIM_TYPE_TRI_STRIP (6 << 0)
#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0)
#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) // GUESS (based on r200)
#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
#define R300_PRIM_TYPE_QUADS (13 << 0)
#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
#define R300_PRIM_TYPE_POLYGON (15 << 0)
#define R300_PRIM_TYPE_MASK 0xF
#define R300_PRIM_WALK_IND (1 << 4)
#define R300_PRIM_WALK_LIST (2 << 4)
#define R300_PRIM_WALK_RING (3 << 4)
#define R300_PRIM_WALK_MASK (3 << 4)
#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) // GUESS (based on r200)
#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) // GUESS
#define R300_PRIM_NUM_VERTICES_SHIFT 16
// Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
// Two parameter dwords:
// 0. The first parameter appears to be always 0
// 1. The second parameter is a standard primitive emission dword.
#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
// Specify the full set of vertex arrays as (address, stride).
// The first parameter is the number of vertex arrays specified.
// The rest of the command is a variable length list of blocks, where
// each block is three dwords long and specifies two arrays.
// The first dword of a block is split into two words, the lower significant
// word refers to the first array, the more significant word to the second
// array in the block.
// The low byte of each word contains the size of an array entry in dwords,
// the high byte contains the stride of the array.
// The second dword of a block contains the pointer to the first array,
// the third dword of a block contains the pointer to the second array.
// Note that if the total number of arrays is odd, the third dword of
// the last block is omitted.
#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
#define R300_PACKET3_INDX_BUFFER 0x00003300
# define R300_EB_UNK1_SHIFT 24
# define R300_EB_UNK1 (0x80<<24)
# define R300_EB_UNK2 0x0810
#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
//END
#endif /* _R300_REG_H */
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include "drm.h" #include "drm.h"
#include "radeon_drm.h" #include "radeon_drm.h"
#include "radeon_drv.h" #include "radeon_drv.h"
#include "r300_reg.h"
#define RADEON_FIFO_DEBUG 0 #define RADEON_FIFO_DEBUG 0
...@@ -1151,6 +1152,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, ...@@ -1151,6 +1152,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
#if __OS_HAS_AGP #if __OS_HAS_AGP
if ( !dev_priv->is_pci ) { if ( !dev_priv->is_pci ) {
/* set RADEON_AGP_BASE here instead of relying on X from user space */
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
dev_priv->ring_rptr->offset dev_priv->ring_rptr->offset
- dev->agp->base - dev->agp->base
...@@ -1407,6 +1410,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) ...@@ -1407,6 +1410,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
radeon_do_cleanup_cp(dev); radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) { if(!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n"); DRM_ERROR("could not find dma buffer region!\n");
...@@ -1625,6 +1629,9 @@ int radeon_cp_init( DRM_IOCTL_ARGS ) ...@@ -1625,6 +1629,9 @@ int radeon_cp_init( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) ); DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
if(init.func == RADEON_INIT_R300_CP)
r300_init_reg_flags();
switch ( init.func ) { switch ( init.func ) {
case RADEON_INIT_CP: case RADEON_INIT_CP:
case RADEON_INIT_R200_CP: case RADEON_INIT_R200_CP:
...@@ -2039,13 +2046,41 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags) ...@@ -2039,13 +2046,41 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
case CHIP_RV200: case CHIP_RV200:
case CHIP_R200: case CHIP_R200:
case CHIP_R300: case CHIP_R300:
case CHIP_R420:
dev_priv->flags |= CHIP_HAS_HIERZ; dev_priv->flags |= CHIP_HAS_HIERZ;
break; break;
default: default:
/* all other chips have no hierarchical z buffer */ /* all other chips have no hierarchical z buffer */
break; break;
} }
if (drm_device_is_agp(dev))
dev_priv->flags |= CHIP_IS_AGP;
DRM_DEBUG("%s card detected\n",
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
return ret;
}
int radeon_presetup(struct drm_device *dev)
{
int ret;
drm_local_map_t *map;
drm_radeon_private_t *dev_priv = dev->dev_private;
ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
drm_get_resource_len(dev, 2), _DRM_REGISTERS,
_DRM_READ_ONLY, &dev_priv->mmio);
if (ret != 0)
return ret; return ret;
ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
_DRM_WRITE_COMBINING, &map);
if (ret != 0)
return ret;
return 0;
} }
int radeon_driver_postcleanup(struct drm_device *dev) int radeon_driver_postcleanup(struct drm_device *dev)
......
...@@ -195,6 +195,52 @@ typedef union { ...@@ -195,6 +195,52 @@ typedef union {
#define RADEON_WAIT_2D 0x1 #define RADEON_WAIT_2D 0x1
#define RADEON_WAIT_3D 0x2 #define RADEON_WAIT_3D 0x2
/* Allowed parameters for R300_CMD_PACKET3
*/
#define R300_CMD_PACKET3_CLEAR 0
#define R300_CMD_PACKET3_RAW 1
/* Commands understood by cmd_buffer ioctl for R300.
* The interface has not been stabilized, so some of these may be removed
* and eventually reordered before stabilization.
*/
#define R300_CMD_PACKET0 1
#define R300_CMD_VPU 2 /* emit vertex program upload */
#define R300_CMD_PACKET3 3 /* emit a packet3 */
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
typedef union {
unsigned int u;
struct {
unsigned char cmd_type, pad0, pad1, pad2;
} header;
struct {
unsigned char cmd_type, count, reglo, reghi;
} packet0;
struct {
unsigned char cmd_type, count, adrlo, adrhi;
} vpu;
struct {
unsigned char cmd_type, packet, pad0, pad1;
} packet3;
struct {
unsigned char cmd_type, packet;
unsigned short count; /* amount of packet2 to emit */
} delay;
struct {
unsigned char cmd_type, buf_idx, pad0, pad1;
} dma;
struct {
unsigned char cmd_type, flags, pad0, pad1;
} wait;
} drm_r300_cmd_header_t;
#define RADEON_FRONT 0x1 #define RADEON_FRONT 0x1
#define RADEON_BACK 0x2 #define RADEON_BACK 0x2
......
...@@ -76,6 +76,7 @@ static struct drm_driver driver = { ...@@ -76,6 +76,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t), .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.preinit = radeon_driver_preinit, .preinit = radeon_driver_preinit,
.presetup = radeon_presetup,
.postcleanup = radeon_driver_postcleanup, .postcleanup = radeon_driver_postcleanup,
.prerelease = radeon_driver_prerelease, .prerelease = radeon_driver_prerelease,
.pretakedown = radeon_driver_pretakedown, .pretakedown = radeon_driver_pretakedown,
......
...@@ -82,9 +82,10 @@ ...@@ -82,9 +82,10 @@
* - Add support for r100 cube maps * - Add support for r100 cube maps
* 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
* texture filtering on r200 * texture filtering on r200
* 1.17- Add initial support for R300 (3D).
*/ */
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 16 #define DRIVER_MINOR 17
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) #define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
...@@ -106,7 +107,9 @@ enum radeon_family { ...@@ -106,7 +107,9 @@ enum radeon_family {
CHIP_RV280, CHIP_RV280,
CHIP_R300, CHIP_R300,
CHIP_RS300, CHIP_RS300,
CHIP_R350,
CHIP_RV350, CHIP_RV350,
CHIP_R420,
CHIP_LAST, CHIP_LAST,
}; };
...@@ -290,6 +293,7 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n ); ...@@ -290,6 +293,7 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
extern int radeon_presetup(struct drm_device *dev);
extern int radeon_driver_postcleanup(struct drm_device *dev); extern int radeon_driver_postcleanup(struct drm_device *dev);
extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
...@@ -320,6 +324,14 @@ extern int radeon_postcleanup( struct drm_device *dev ); ...@@ -320,6 +324,14 @@ extern int radeon_postcleanup( struct drm_device *dev );
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg); unsigned long arg);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(void);
extern int r300_do_cp_cmdbuf(drm_device_t* dev, DRMFILE filp,
drm_file_t* filp_priv,
drm_radeon_cmd_buffer_t* cmdbuf);
/* Flags for stats.boxes /* Flags for stats.boxes
*/ */
#define RADEON_BOX_DMA_IDLE 0x1 #define RADEON_BOX_DMA_IDLE 0x1
...@@ -357,6 +369,11 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, ...@@ -357,6 +369,11 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
#define RADEON_CRTC2_OFFSET 0x0324 #define RADEON_CRTC2_OFFSET 0x0324
#define RADEON_CRTC2_OFFSET_CNTL 0x0328 #define RADEON_CRTC2_OFFSET_CNTL 0x0328
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
#define RADEON_AGP_BASE 0x0170
#define RADEON_RB3D_COLOROFFSET 0x1c40 #define RADEON_RB3D_COLOROFFSET 0x1c40
#define RADEON_RB3D_COLORPITCH 0x1c48 #define RADEON_RB3D_COLORPITCH 0x1c48
...@@ -651,16 +668,27 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, ...@@ -651,16 +668,27 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
#define RADEON_CP_PACKET1 0x40000000 #define RADEON_CP_PACKET1 0x40000000
#define RADEON_CP_PACKET2 0x80000000 #define RADEON_CP_PACKET2 0x80000000
#define RADEON_CP_PACKET3 0xC0000000 #define RADEON_CP_PACKET3 0xC0000000
# define RADEON_CP_NOP 0x00001000
# define RADEON_CP_NEXT_CHAR 0x00001900
# define RADEON_CP_PLY_NEXTSCAN 0x00001D00
# define RADEON_CP_SET_SCISSORS 0x00001E00
/* GEN_INDX_PRIM is unsupported starting with R300 */
# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
# define RADEON_WAIT_FOR_IDLE 0x00002600 # define RADEON_WAIT_FOR_IDLE 0x00002600
# define RADEON_3D_DRAW_VBUF 0x00002800 # define RADEON_3D_DRAW_VBUF 0x00002800
# define RADEON_3D_DRAW_IMMD 0x00002900 # define RADEON_3D_DRAW_IMMD 0x00002900
# define RADEON_3D_DRAW_INDX 0x00002A00 # define RADEON_3D_DRAW_INDX 0x00002A00
# define RADEON_CP_LOAD_PALETTE 0x00002C00
# define RADEON_3D_LOAD_VBPNTR 0x00002F00 # define RADEON_3D_LOAD_VBPNTR 0x00002F00
# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 # define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 # define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
# define RADEON_3D_CLEAR_ZMASK 0x00003200 # define RADEON_3D_CLEAR_ZMASK 0x00003200
# define RADEON_CP_INDX_BUFFER 0x00003300
# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
# define RADEON_CP_3D_DRAW_INDX_2 0x00003600
# define RADEON_3D_CLEAR_HIZ 0x00003700 # define RADEON_3D_CLEAR_HIZ 0x00003700
# define RADEON_CP_3D_CLEAR_CMASK 0x00003802
# define RADEON_CNTL_HOSTDATA_BLT 0x00009400 # define RADEON_CNTL_HOSTDATA_BLT 0x00009400
# define RADEON_CNTL_PAINT_MULTI 0x00009A00 # define RADEON_CNTL_PAINT_MULTI 0x00009A00
# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 # define RADEON_CNTL_BITBLT_MULTI 0x00009B00
......
...@@ -1493,7 +1493,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev, ...@@ -1493,7 +1493,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
} }
#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
static int radeon_cp_dispatch_texture( DRMFILE filp, static int radeon_cp_dispatch_texture( DRMFILE filp,
drm_device_t *dev, drm_device_t *dev,
...@@ -1506,10 +1506,11 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, ...@@ -1506,10 +1506,11 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
u32 format; u32 format;
u32 *buffer; u32 *buffer;
const u8 __user *data; const u8 __user *data;
int size, dwords, tex_width, blit_width; int size, dwords, tex_width, blit_width, spitch;
u32 height; u32 height;
int i; int i;
u32 texpitch, microtile; u32 texpitch, microtile;
u32 offset;
RING_LOCALS; RING_LOCALS;
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp ); DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
...@@ -1530,17 +1531,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, ...@@ -1530,17 +1531,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
RADEON_WAIT_UNTIL_IDLE(); RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING(); ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
/* The compiler won't optimize away a division by a variable, /* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll * even if the only legal values are powers of two. Thus, we'll
* use a shift instead. * use a shift instead.
...@@ -1572,6 +1562,10 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, ...@@ -1572,6 +1562,10 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
DRM_ERROR( "invalid texture format %d\n", tex->format ); DRM_ERROR( "invalid texture format %d\n", tex->format );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
spitch = blit_width >> 6;
if (spitch == 0 && image->height > 1)
return DRM_ERR(EINVAL);
texpitch = tex->pitch; texpitch = tex->pitch;
if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
microtile = 1; microtile = 1;
...@@ -1624,25 +1618,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, ...@@ -1624,25 +1618,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
*/ */
buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset); buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
dwords = size / 4; dwords = size / 4;
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_HOST_DATA |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
buffer[2] = (texpitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff;
buffer[5] = (image->y << 16) | image->x;
buffer[6] = (height << 16) | image->width;
buffer[7] = dwords;
buffer += 8;
if (microtile) { if (microtile) {
/* texture micro tiling in use, minimum texture width is thus 16 bytes. /* texture micro tiling in use, minimum texture width is thus 16 bytes.
...@@ -1750,9 +1725,28 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, ...@@ -1750,9 +1725,28 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
} }
buf->filp = filp; buf->filp = filp;
buf->used = (dwords + 8) * sizeof(u32); buf->used = size;
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); offset = dev_priv->gart_buffers_offset + buf->offset;
radeon_cp_discard_buffer( dev, buf ); BEGIN_RING(9);
OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS );
OUT_RING((spitch << 22) | (offset >> 10));
OUT_RING((texpitch << 22) | (tex->offset >> 10));
OUT_RING(0);
OUT_RING((image->x << 16) | image->y);
OUT_RING((image->width << 16) | height);
RADEON_WAIT_UNTIL_2D_IDLE();
ADVANCE_RING();
radeon_cp_discard_buffer(dev, buf);
/* Update the input parameters for next time */ /* Update the input parameters for next time */
image->y += height; image->y += height;
...@@ -2797,6 +2791,17 @@ static int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) ...@@ -2797,6 +2791,17 @@ static int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
orig_nbox = cmdbuf.nbox; orig_nbox = cmdbuf.nbox;
if(dev_priv->microcode_version == UCODE_R300) {
int temp;
temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
return temp;
}
/* microcode_version != r300 */
while ( cmdbuf.bufsz >= sizeof(header) ) { while ( cmdbuf.bufsz >= sizeof(header) ) {
header.i = *(int *)cmdbuf.buf; header.i = *(int *)cmdbuf.buf;
......
/* savage_bci.c -- BCI support for Savage
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
/* Need a long timeout for shadow status updates can take a while
* and so can waiting for events when the queue is full. */
#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
#define SAVAGE_FREELIST_DEBUG 0
static int
savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
{
uint32_t mask = dev_priv->status_used_mask;
uint32_t threshold = dev_priv->bci_threshold_hi;
uint32_t status;
int i;
#if SAVAGE_BCI_DEBUG
if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
DRM_ERROR("Trying to emit %d words "
"(more than guaranteed space in COB)\n", n);
#endif
for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
DRM_MEMORYBARRIER();
status = dev_priv->status_ptr[0];
if ((status & mask) < threshold)
return 0;
DRM_UDELAY(1);
}
#if SAVAGE_BCI_DEBUG
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
#endif
return DRM_ERR(EBUSY);
}
static int
savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
{
uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
uint32_t status;
int i;
for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
return 0;
DRM_UDELAY(1);
}
#if SAVAGE_BCI_DEBUG
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
return DRM_ERR(EBUSY);
}
static int
savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
{
uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
uint32_t status;
int i;
for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
return 0;
DRM_UDELAY(1);
}
#if SAVAGE_BCI_DEBUG
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
return DRM_ERR(EBUSY);
}
/*
* Waiting for events.
*
* The BIOSresets the event tag to 0 on mode changes. Therefore we
* never emit 0 to the event tag. If we find a 0 event tag we know the
* BIOS stomped on it and return success assuming that the BIOS waited
* for engine idle.
*
* Note: if the Xserver uses the event tag it has to follow the same
* rule. Otherwise there may be glitches every 2^16 events.
*/
static int
savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
{
uint32_t status;
int i;
for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
DRM_MEMORYBARRIER();
status = dev_priv->status_ptr[1];
if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
(status & 0xffff) == 0)
return 0;
DRM_UDELAY(1);
}
#if SAVAGE_BCI_DEBUG
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
#endif
return DRM_ERR(EBUSY);
}
static int
savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
{
uint32_t status;
int i;
for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
(status & 0xffff) == 0)
return 0;
DRM_UDELAY(1);
}
#if SAVAGE_BCI_DEBUG
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
#endif
return DRM_ERR(EBUSY);
}
uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
unsigned int flags)
{
uint16_t count;
BCI_LOCALS;
if (dev_priv->status_ptr) {
/* coordinate with Xserver */
count = dev_priv->status_ptr[1023];
if (count < dev_priv->event_counter)
dev_priv->event_wrap++;
} else {
count = dev_priv->event_counter;
}
count = (count + 1) & 0xffff;
if (count == 0) {
count++; /* See the comment above savage_wait_event_*. */
dev_priv->event_wrap++;
}
dev_priv->event_counter = count;
if (dev_priv->status_ptr)
dev_priv->status_ptr[1023] = (uint32_t)count;
if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
unsigned int wait_cmd = BCI_CMD_WAIT;
if ((flags & SAVAGE_WAIT_2D))
wait_cmd |= BCI_CMD_WAIT_2D;
if ((flags & SAVAGE_WAIT_3D))
wait_cmd |= BCI_CMD_WAIT_3D;
BEGIN_BCI(2);
BCI_WRITE(wait_cmd);
} else {
BEGIN_BCI(1);
}
BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);
return count;
}
/*
* Freelist management
*/
static int savage_freelist_init(drm_device_t *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_savage_buf_priv_t *entry;
int i;
DRM_DEBUG("count=%d\n", dma->buf_count);
dev_priv->head.next = &dev_priv->tail;
dev_priv->head.prev = NULL;
dev_priv->head.buf = NULL;
dev_priv->tail.next = NULL;
dev_priv->tail.prev = &dev_priv->head;
dev_priv->tail.buf = NULL;
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
entry = buf->dev_private;
SET_AGE(&entry->age, 0, 0);
entry->buf = buf;
entry->next = dev_priv->head.next;
entry->prev = &dev_priv->head;
dev_priv->head.next->prev = entry;
dev_priv->head.next = entry;
}
return 0;
}
static drm_buf_t *savage_freelist_get(drm_device_t *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
uint16_t event;
unsigned int wrap;
DRM_DEBUG("\n");
UPDATE_EVENT_COUNTER();
if (dev_priv->status_ptr)
event = dev_priv->status_ptr[1] & 0xffff;
else
event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
wrap = dev_priv->event_wrap;
if (event > dev_priv->event_counter)
wrap--; /* hardware hasn't passed the last wrap yet */
DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
drm_savage_buf_priv_t *next = tail->next;
drm_savage_buf_priv_t *prev = tail->prev;
prev->next = next;
next->prev = prev;
tail->next = tail->prev = NULL;
return tail->buf;
}
DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
return NULL;
}
void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
if (entry->next != NULL || entry->prev != NULL) {
DRM_ERROR("entry already on freelist.\n");
return;
}
prev = &dev_priv->head;
next = prev->next;
prev->next = entry;
next->prev = entry;
entry->prev = prev;
entry->next = next;
}
/*
* Command DMA
*/
static int savage_dma_init(drm_savage_private_t *dev_priv)
{
unsigned int i;
dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
(SAVAGE_DMA_PAGE_SIZE*4);
dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
dev_priv->nr_dma_pages,
DRM_MEM_DRIVER);
if (dev_priv->dma_pages == NULL)
return DRM_ERR(ENOMEM);
for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
dev_priv->dma_pages[i].used = 0;
dev_priv->dma_pages[i].flushed = 0;
}
SET_AGE(&dev_priv->last_dma_age, 0, 0);
dev_priv->first_dma_page = 0;
dev_priv->current_dma_page = 0;
return 0;
}
void savage_dma_reset(drm_savage_private_t *dev_priv)
{
uint16_t event;
unsigned int wrap, i;
event = savage_bci_emit_event(dev_priv, 0);
wrap = dev_priv->event_wrap;
for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
dev_priv->dma_pages[i].used = 0;
dev_priv->dma_pages[i].flushed = 0;
}
SET_AGE(&dev_priv->last_dma_age, event, wrap);
dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
}
void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
{
uint16_t event;
unsigned int wrap;
/* Faked DMA buffer pages don't age. */
if (dev_priv->cmd_dma == &dev_priv->fake_dma)
return;
UPDATE_EVENT_COUNTER();
if (dev_priv->status_ptr)
event = dev_priv->status_ptr[1] & 0xffff;
else
event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
wrap = dev_priv->event_wrap;
if (event > dev_priv->event_counter)
wrap--; /* hardware hasn't passed the last wrap yet */
if (dev_priv->dma_pages[page].age.wrap > wrap ||
(dev_priv->dma_pages[page].age.wrap == wrap &&
dev_priv->dma_pages[page].age.event > event)) {
if (dev_priv->wait_evnt(dev_priv,
dev_priv->dma_pages[page].age.event)
< 0)
DRM_ERROR("wait_evnt failed!\n");
}
}
uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
{
unsigned int cur = dev_priv->current_dma_page;
unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
dev_priv->dma_pages[cur].used;
unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) /
SAVAGE_DMA_PAGE_SIZE;
uint32_t *dma_ptr;
unsigned int i;
DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
if (cur + nr_pages < dev_priv->nr_dma_pages) {
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur*SAVAGE_DMA_PAGE_SIZE +
dev_priv->dma_pages[cur].used;
if (n < rest)
rest = n;
dev_priv->dma_pages[cur].used += rest;
n -= rest;
cur++;
} else {
dev_priv->dma_flush(dev_priv);
nr_pages = (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
dev_priv->dma_pages[i].used = 0;
dev_priv->dma_pages[i].flushed = 0;
}
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
dev_priv->first_dma_page = cur = 0;
}
for (i = cur; nr_pages > 0; ++i, --nr_pages) {
#if SAVAGE_DMA_DEBUG
if (dev_priv->dma_pages[i].used) {
DRM_ERROR("unflushed page %u: used=%u\n",
i, dev_priv->dma_pages[i].used);
}
#endif
if (n > SAVAGE_DMA_PAGE_SIZE)
dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
else
dev_priv->dma_pages[i].used = n;
n -= SAVAGE_DMA_PAGE_SIZE;
}
dev_priv->current_dma_page = --i;
DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
i, dev_priv->dma_pages[i].used, n);
savage_dma_wait(dev_priv, dev_priv->current_dma_page);
return dma_ptr;
}
static void savage_dma_flush(drm_savage_private_t *dev_priv)
{
unsigned int first = dev_priv->first_dma_page;
unsigned int cur = dev_priv->current_dma_page;
uint16_t event;
unsigned int wrap, pad, align, len, i;
unsigned long phys_addr;
BCI_LOCALS;
if (first == cur &&
dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
return;
/* pad length to multiples of 2 entries
* align start of next DMA block to multiles of 8 entries */
pad = -dev_priv->dma_pages[cur].used & 1;
align = -(dev_priv->dma_pages[cur].used + pad) & 7;
DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
"pad=%u, align=%u\n",
first, cur, dev_priv->dma_pages[first].flushed,
dev_priv->dma_pages[cur].used, pad, align);
/* pad with noops */
if (pad) {
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur * SAVAGE_DMA_PAGE_SIZE +
dev_priv->dma_pages[cur].used;
dev_priv->dma_pages[cur].used += pad;
while(pad != 0) {
*dma_ptr++ = BCI_CMD_WAIT;
pad--;
}
}
DRM_MEMORYBARRIER();
/* do flush ... */
phys_addr = dev_priv->cmd_dma->offset +
(first * SAVAGE_DMA_PAGE_SIZE +
dev_priv->dma_pages[first].flushed) * 4;
len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
dev_priv->dma_pages[cur].used -
dev_priv->dma_pages[first].flushed;
DRM_DEBUG("phys_addr=%lx, len=%u\n",
phys_addr | dev_priv->dma_type, len);
BEGIN_BCI(3);
BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
BCI_WRITE(phys_addr | dev_priv->dma_type);
BCI_DMA(len);
/* fix alignment of the start of the next block */
dev_priv->dma_pages[cur].used += align;
/* age DMA pages */
event = savage_bci_emit_event(dev_priv, 0);
wrap = dev_priv->event_wrap;
for (i = first; i < cur; ++i) {
SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
dev_priv->dma_pages[i].used = 0;
dev_priv->dma_pages[i].flushed = 0;
}
/* age the current page only when it's full */
if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
dev_priv->dma_pages[cur].used = 0;
dev_priv->dma_pages[cur].flushed = 0;
/* advance to next page */
cur++;
if (cur == dev_priv->nr_dma_pages)
cur = 0;
dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
} else {
dev_priv->first_dma_page = cur;
dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
}
SET_AGE(&dev_priv->last_dma_age, event, wrap);
DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
dev_priv->dma_pages[cur].used,
dev_priv->dma_pages[cur].flushed);
}
static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
{
unsigned int i, j;
BCI_LOCALS;
if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
return;
DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
dev_priv->first_dma_page, dev_priv->current_dma_page,
dev_priv->dma_pages[dev_priv->current_dma_page].used);
for (i = dev_priv->first_dma_page;
i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
++i) {
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
i * SAVAGE_DMA_PAGE_SIZE;
#if SAVAGE_DMA_DEBUG
/* Sanity check: all pages except the last one must be full. */
if (i < dev_priv->current_dma_page &&
dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
DRM_ERROR("partial DMA page %u: used=%u",
i, dev_priv->dma_pages[i].used);
}
#endif
BEGIN_BCI(dev_priv->dma_pages[i].used);
for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
BCI_WRITE(dma_ptr[j]);
}
dev_priv->dma_pages[i].used = 0;
}
/* reset to first page */
dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
}
/*
* Initalize mappings. On Savage4 and SavageIX the alignment
* and size of the aperture is not suitable for automatic MTRR setup
* in drm_addmap. Therefore we do it manually before the maps are
* initialized. We also need to take care of deleting the MTRRs in
* postcleanup.
*/
int savage_preinit(drm_device_t *dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
unsigned long mmio_base, fb_base, fb_size, aperture_base;
/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
* in case we decide we need information on the BAR for BSD in the
* future.
*/
unsigned int fb_rsrc, aper_rsrc;
int ret = 0;
dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return DRM_ERR(ENOMEM);
memset(dev_priv, 0, sizeof(drm_savage_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = (enum savage_family)chipset;
dev_priv->mtrr[0].handle = -1;
dev_priv->mtrr[1].handle = -1;
dev_priv->mtrr[2].handle = -1;
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
fb_rsrc = 0;
fb_base = drm_get_resource_start(dev, 0);
fb_size = SAVAGE_FB_SIZE_S3;
mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
aper_rsrc = 0;
aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
/* this should always be true */
if (drm_get_resource_len(dev, 0) == 0x08000000) {
/* Don't make MMIO write-cobining! We need 3
* MTRRs. */
dev_priv->mtrr[0].base = fb_base;
dev_priv->mtrr[0].size = 0x01000000;
dev_priv->mtrr[0].handle = mtrr_add(
dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
MTRR_TYPE_WRCOMB, 1);
dev_priv->mtrr[1].base = fb_base+0x02000000;
dev_priv->mtrr[1].size = 0x02000000;
dev_priv->mtrr[1].handle = mtrr_add(
dev_priv->mtrr[1].base, dev_priv->mtrr[1].size,
MTRR_TYPE_WRCOMB, 1);
dev_priv->mtrr[2].base = fb_base+0x04000000;
dev_priv->mtrr[2].size = 0x04000000;
dev_priv->mtrr[2].handle = mtrr_add(
dev_priv->mtrr[2].base, dev_priv->mtrr[2].size,
MTRR_TYPE_WRCOMB, 1);
} else {
DRM_ERROR("strange pci_resource_len %08lx\n",
drm_get_resource_len(dev, 0));
}
} else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) {
mmio_base = drm_get_resource_start(dev, 0);
fb_rsrc = 1;
fb_base = drm_get_resource_start(dev, 1);
fb_size = SAVAGE_FB_SIZE_S4;
aper_rsrc = 1;
aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
/* this should always be true */
if (drm_get_resource_len(dev, 1) == 0x08000000) {
/* Can use one MTRR to cover both fb and
* aperture. */
dev_priv->mtrr[0].base = fb_base;
dev_priv->mtrr[0].size = 0x08000000;
dev_priv->mtrr[0].handle = mtrr_add(
dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
MTRR_TYPE_WRCOMB, 1);
} else {
DRM_ERROR("strange pci_resource_len %08lx\n",
drm_get_resource_len(dev, 1));
}
} else {
mmio_base = drm_get_resource_start(dev, 0);
fb_rsrc = 1;
fb_base = drm_get_resource_start(dev, 1);
fb_size = drm_get_resource_len(dev, 1);
aper_rsrc = 2;
aperture_base = drm_get_resource_start(dev, 2);
/* Automatic MTRR setup will do the right thing. */
}
ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
_DRM_READ_ONLY, &dev_priv->mmio);
if (ret)
return ret;
ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
_DRM_WRITE_COMBINING, &dev_priv->fb);
if (ret)
return ret;
ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
&dev_priv->aperture);
if (ret)
return ret;
return ret;
}
/*
* Delete MTRRs and free device-private data.
*/
int savage_postcleanup(drm_device_t *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
for (i = 0; i < 3; ++i)
if (dev_priv->mtrr[i].handle >= 0)
mtrr_del(dev_priv->mtrr[i].handle,
dev_priv->mtrr[i].base,
dev_priv->mtrr[i].size);
drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
return 0;
}
static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
{
drm_savage_private_t *dev_priv = dev->dev_private;
if (init->fb_bpp != 16 && init->fb_bpp != 32) {
DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
return DRM_ERR(EINVAL);
}
if (init->depth_bpp != 16 && init->depth_bpp != 32) {
DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
return DRM_ERR(EINVAL);
}
if (init->dma_type != SAVAGE_DMA_AGP &&
init->dma_type != SAVAGE_DMA_PCI) {
DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
return DRM_ERR(EINVAL);
}
dev_priv->cob_size = init->cob_size;
dev_priv->bci_threshold_lo = init->bci_threshold_lo;
dev_priv->bci_threshold_hi = init->bci_threshold_hi;
dev_priv->dma_type = init->dma_type;
dev_priv->fb_bpp = init->fb_bpp;
dev_priv->front_offset = init->front_offset;
dev_priv->front_pitch = init->front_pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->back_pitch = init->back_pitch;
dev_priv->depth_bpp = init->depth_bpp;
dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch;
dev_priv->texture_offset = init->texture_offset;
dev_priv->texture_size = init->texture_size;
DRM_GETSAREA();
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
if (init->status_offset != 0) {
dev_priv->status = drm_core_findmap(dev, init->status_offset);
if (!dev_priv->status) {
DRM_ERROR("could not find shadow status region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
} else {
dev_priv->status = NULL;
}
if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
dev->agp_buffer_map = drm_core_findmap(dev,
init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find DMA buffer region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev->agp_buffer_map) {
DRM_ERROR("failed to ioremap DMA buffer region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
}
}
if (init->agp_textures_offset) {
dev_priv->agp_textures =
drm_core_findmap(dev, init->agp_textures_offset);
if (!dev_priv->agp_textures) {
DRM_ERROR("could not find agp texture region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
} else {
dev_priv->agp_textures = NULL;
}
if (init->cmd_dma_offset) {
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
DRM_ERROR("command DMA not supported on "
"Savage3D/MX/IX.\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
if (dev->dma && dev->dma->buflist) {
DRM_ERROR("command and vertex DMA not supported "
"at the same time.\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
if (!dev_priv->cmd_dma) {
DRM_ERROR("could not find command DMA region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
if (dev_priv->cmd_dma->type != _DRM_AGP) {
DRM_ERROR("AGP command DMA region is not a "
"_DRM_AGP map!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
drm_core_ioremap(dev_priv->cmd_dma, dev);
if (!dev_priv->cmd_dma->handle) {
DRM_ERROR("failed to ioremap command "
"DMA region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
}
} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
DRM_ERROR("PCI command DMA region is not a "
"_DRM_CONSISTENT map!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
}
} else {
dev_priv->cmd_dma = NULL;
}
dev_priv->dma_flush = savage_dma_flush;
if (!dev_priv->cmd_dma) {
DRM_DEBUG("falling back to faked command DMA.\n");
dev_priv->fake_dma.offset = 0;
dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
dev_priv->fake_dma.type = _DRM_SHM;
dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
DRM_MEM_DRIVER);
if (!dev_priv->fake_dma.handle) {
DRM_ERROR("could not allocate faked DMA buffer!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
}
dev_priv->cmd_dma = &dev_priv->fake_dma;
dev_priv->dma_flush = savage_fake_dma_flush;
}
dev_priv->sarea_priv =
(drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
init->sarea_priv_offset);
/* setup bitmap descriptors */
{
unsigned int color_tile_format;
unsigned int depth_tile_format;
unsigned int front_stride, back_stride, depth_stride;
if (dev_priv->chipset <= S3_SAVAGE4) {
color_tile_format = dev_priv->fb_bpp == 16 ?
SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
depth_tile_format = dev_priv->depth_bpp == 16 ?
SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
} else {
color_tile_format = SAVAGE_BD_TILE_DEST;
depth_tile_format = SAVAGE_BD_TILE_DEST;
}
front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8);
back_stride = dev_priv-> back_pitch / (dev_priv->fb_bpp/8);
depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8);
dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
(color_tile_format << SAVAGE_BD_TILE_SHIFT);
dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
(color_tile_format << SAVAGE_BD_TILE_SHIFT);
dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
(dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
(depth_tile_format << SAVAGE_BD_TILE_SHIFT);
}
/* setup status and bci ptr */
dev_priv->event_counter = 0;
dev_priv->event_wrap = 0;
dev_priv->bci_ptr = (volatile uint32_t *)
((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
} else {
dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
}
if (dev_priv->status != NULL) {
dev_priv->status_ptr =
(volatile uint32_t *)dev_priv->status->handle;
dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
dev_priv->wait_evnt = savage_bci_wait_event_shadow;
dev_priv->status_ptr[1023] = dev_priv->event_counter;
} else {
dev_priv->status_ptr = NULL;
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
} else {
dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
}
dev_priv->wait_evnt = savage_bci_wait_event_reg;
}
/* cliprect functions */
if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
else
dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
if (savage_freelist_init(dev) < 0) {
DRM_ERROR("could not initialize freelist\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
}
if (savage_dma_init(dev_priv) < 0) {
DRM_ERROR("could not initialize command DMA\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
}
return 0;
}
int savage_do_cleanup_bci(drm_device_t *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
if (dev_priv->fake_dma.handle)
drm_free(dev_priv->fake_dma.handle,
SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
} else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
dev_priv->cmd_dma->type == _DRM_AGP &&
dev_priv->dma_type == SAVAGE_DMA_AGP)
drm_core_ioremapfree(dev_priv->cmd_dma, dev);
if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
dev->agp_buffer_map && dev->agp_buffer_map->handle) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
/* make sure the next instance (which may be running
* in PCI mode) doesn't try to use an old
* agp_buffer_map. */
dev->agp_buffer_map = NULL;
}
if (dev_priv->dma_pages)
drm_free(dev_priv->dma_pages,
sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
DRM_MEM_DRIVER);
return 0;
}
static int savage_bci_init(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_savage_init_t init;
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,
sizeof(init));
switch (init.func) {
case SAVAGE_INIT_BCI:
return savage_do_init_bci(dev, &init);
case SAVAGE_CLEANUP_BCI:
return savage_do_cleanup_bci(dev);
}
return DRM_ERR(EINVAL);
}
static int savage_bci_event_emit(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_event_emit_t event;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,
sizeof(event));
event.count = savage_bci_emit_event(dev_priv, event.flags);
event.count |= dev_priv->event_wrap << 16;
DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *)data)->count,
event.count, sizeof(event.count));
return 0;
}
static int savage_bci_event_wait(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_event_wait_t event;
unsigned int event_e, hw_e;
unsigned int event_w, hw_w;
DRM_DEBUG("\n");
DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,
sizeof(event));
UPDATE_EVENT_COUNTER();
if (dev_priv->status_ptr)
hw_e = dev_priv->status_ptr[1] & 0xffff;
else
hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
hw_w = dev_priv->event_wrap;
if (hw_e > dev_priv->event_counter)
hw_w--; /* hardware hasn't passed the last wrap yet */
event_e = event.count & 0xffff;
event_w = event.count >> 16;
/* Don't need to wait if
* - event counter wrapped since the event was emitted or
* - the hardware has advanced up to or over the event to wait for.
*/
if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )
return 0;
else
return dev_priv->wait_evnt(dev_priv, event_e);
}
/*
* DMA buffer management
*/
static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
{
drm_buf_t *buf;
int i;
for (i = d->granted_count; i < d->request_count; i++) {
buf = savage_freelist_get(dev);
if (!buf)
return DRM_ERR(EAGAIN);
buf->filp = filp;
if (DRM_COPY_TO_USER(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return DRM_ERR(EFAULT);
if (DRM_COPY_TO_USER(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return DRM_ERR(EFAULT);
d->granted_count++;
}
return 0;
}
int savage_bci_buffers(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_dma_t d;
int ret = 0;
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));
/* Please don't send us buffers.
*/
if (d.send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d.send_count);
return DRM_ERR(EINVAL);
}
/* We'll send you buffers.
*/
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d.request_count, dma->buf_count);
return DRM_ERR(EINVAL);
}
d.granted_count = 0;
if (d.request_count) {
ret = savage_bci_get_buffers(filp, dev, &d);
}
DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));
return ret;
}
void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {
drm_device_dma_t *dma = dev->dma;
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
if (!dma)
return;
if (!dev_priv)
return;
if (!dma->buflist)
return;
/*i830_flush_queue(dev);*/
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[i];
drm_savage_buf_priv_t *buf_priv = buf->dev_private;
if (buf->filp == filp && buf_priv &&
buf_priv->next == NULL && buf_priv->prev == NULL) {
uint16_t event;
DRM_DEBUG("reclaimed from client\n");
event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
savage_freelist_put(dev, buf);
}
}
drm_core_reclaim_buffers(dev, filp);
}
drm_ioctl_desc_t savage_ioctls[] = {
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},
};
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
/* savage_drm.h -- Public header for the savage driver
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __SAVAGE_DRM_H__
#define __SAVAGE_DRM_H__
#ifndef __SAVAGE_SAREA_DEFINES__
#define __SAVAGE_SAREA_DEFINES__
/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define SAVAGE_CARD_HEAP 0
#define SAVAGE_AGP_HEAP 1
#define SAVAGE_NR_TEX_HEAPS 2
#define SAVAGE_NR_TEX_REGIONS 16
#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
#endif /* __SAVAGE_SAREA_DEFINES__ */
typedef struct _drm_savage_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_savage_sarea_t, *drm_savage_sarea_ptr;
/* Savage-specific ioctls
*/
#define DRM_SAVAGE_BCI_INIT 0x00
#define DRM_SAVAGE_BCI_CMDBUF 0x01
#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
#define SAVAGE_DMA_PCI 1
#define SAVAGE_DMA_AGP 3
typedef struct drm_savage_init {
enum {
SAVAGE_INIT_BCI = 1,
SAVAGE_CLEANUP_BCI = 2
} func;
unsigned int sarea_priv_offset;
/* some parameters */
unsigned int cob_size;
unsigned int bci_threshold_lo, bci_threshold_hi;
unsigned int dma_type;
/* frame buffer layout */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
/* local textures */
unsigned int texture_offset;
unsigned int texture_size;
/* physical locations of non-permanent maps */
unsigned long status_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
unsigned long cmd_dma_offset;
} drm_savage_init_t;
typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
typedef struct drm_savage_cmdbuf {
/* command buffer in client's address space */
drm_savage_cmd_header_t __user *cmd_addr;
unsigned int size; /* size of the command buffer in 64bit units */
unsigned int dma_idx; /* DMA buffer index to use */
int discard; /* discard DMA buffer when done */
/* vertex buffer in client's address space */
unsigned int __user *vb_addr;
unsigned int vb_size; /* size of client vertex buffer in bytes */
unsigned int vb_stride; /* stride of vertices in 32bit words */
/* boxes in client's address space */
drm_clip_rect_t __user *box_addr;
unsigned int nbox; /* number of clipping boxes */
} drm_savage_cmdbuf_t;
#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
typedef struct drm_savage_event {
unsigned int count;
unsigned int flags;
} drm_savage_event_emit_t, drm_savage_event_wait_t;
/* Commands for the cmdbuf ioctl
*/
#define SAVAGE_CMD_STATE 0 /* a range of state registers */
#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
#define SAVAGE_CMD_SWAP 6 /* swap buffers */
/* Primitive types
*/
#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
* shading on s3d */
/* Skip flags (vertex format)
*/
#define SAVAGE_SKIP_Z 0x01
#define SAVAGE_SKIP_W 0x02
#define SAVAGE_SKIP_C0 0x04
#define SAVAGE_SKIP_C1 0x08
#define SAVAGE_SKIP_S0 0x10
#define SAVAGE_SKIP_T0 0x20
#define SAVAGE_SKIP_ST0 0x30
#define SAVAGE_SKIP_S1 0x40
#define SAVAGE_SKIP_T1 0x80
#define SAVAGE_SKIP_ST1 0xc0
#define SAVAGE_SKIP_ALL_S3D 0x3f
#define SAVAGE_SKIP_ALL_S4 0xff
/* Buffer names for clear command
*/
#define SAVAGE_FRONT 0x1
#define SAVAGE_BACK 0x2
#define SAVAGE_DEPTH 0x4
/* 64-bit command header
*/
union drm_savage_cmd_header {
struct {
unsigned char cmd; /* command */
unsigned char pad0;
unsigned short pad1;
unsigned short pad2;
unsigned short pad3;
} cmd; /* generic */
struct {
unsigned char cmd;
unsigned char global; /* need idle engine? */
unsigned short count; /* number of consecutive registers */
unsigned short start; /* first register */
unsigned short pad3;
} state; /* SAVAGE_CMD_STATE */
struct {
unsigned char cmd;
unsigned char prim; /* primitive type */
unsigned short skip; /* vertex format (skip flags) */
unsigned short count; /* number of vertices */
unsigned short start; /* first vertex in DMA/vertex buffer */
} prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
struct {
unsigned char cmd;
unsigned char prim;
unsigned short skip;
unsigned short count; /* number of indices that follow */
unsigned short pad3;
} idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
struct {
unsigned char cmd;
unsigned char pad0;
unsigned short pad1;
unsigned int flags;
} clear0; /* SAVAGE_CMD_CLEAR */
struct {
unsigned int mask;
unsigned int value;
} clear1; /* SAVAGE_CMD_CLEAR data */
};
#endif
/* savage_drv.c -- Savage driver for Linux
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
{
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->primary.minor,
pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
return 0;
}
static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
extern drm_ioctl_desc_t savage_ioctls[];
extern int savage_max_ioctl;
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.preinit = savage_preinit,
.postinit = postinit,
.postcleanup = savage_postcleanup,
.reclaim_buffers = savage_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.version = version,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
}
};
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
return drm_init(&driver);
}
static void __exit savage_exit(void)
{
drm_exit(&driver);
}
module_init(savage_init);
module_exit(savage_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL and additional rights");
/* savage_drv.h -- Private header for the savage driver
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __SAVAGE_DRV_H__
#define __SAVAGE_DRV_H__
#define DRIVER_AUTHOR "Felix Kuehling"
#define DRIVER_NAME "savage"
#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
#define DRIVER_DATE "20050313"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 4
#define DRIVER_PATCHLEVEL 1
/* Interface history:
*
* 1.x The DRM driver from the VIA/S3 code drop, basically a dummy
* 2.0 The first real DRM
* 2.1 Scissors registers managed by the DRM, 3D operations clipped by
* cliprects of the cmdbuf ioctl
* 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
* 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
* wide and thus very long lived (unlikely to ever wrap). The size
* in the struct was 32 bits before, but only 16 bits were used
* 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
* actually used
*/
typedef struct drm_savage_age {
uint16_t event;
unsigned int wrap;
} drm_savage_age_t;
typedef struct drm_savage_buf_priv {
struct drm_savage_buf_priv *next;
struct drm_savage_buf_priv *prev;
drm_savage_age_t age;
drm_buf_t *buf;
} drm_savage_buf_priv_t;
typedef struct drm_savage_dma_page {
drm_savage_age_t age;
unsigned int used, flushed;
} drm_savage_dma_page_t;
#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
* size of 16kbytes or 4k entries. Minimum requirement would be
* 10kbytes for 255 40-byte vertices in one drawing command. */
#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
/* interesting bits of hardware state that are saved in dev_priv */
typedef union {
struct drm_savage_common_state {
uint32_t vbaddr;
} common;
struct {
unsigned char pad[sizeof(struct drm_savage_common_state)];
uint32_t texctrl, texaddr;
uint32_t scstart, new_scstart;
uint32_t scend, new_scend;
} s3d;
struct {
unsigned char pad[sizeof(struct drm_savage_common_state)];
uint32_t texdescr, texaddr0, texaddr1;
uint32_t drawctrl0, new_drawctrl0;
uint32_t drawctrl1, new_drawctrl1;
} s4;
} drm_savage_state_t;
/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
enum savage_family {
S3_UNKNOWN = 0,
S3_SAVAGE3D,
S3_SAVAGE_MX,
S3_SAVAGE4,
S3_PROSAVAGE,
S3_TWISTER,
S3_PROSAVAGEDDR,
S3_SUPERSAVAGE,
S3_SAVAGE2000,
S3_LAST
};
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \
|| (chip==S3_PROSAVAGE) \
|| (chip==S3_TWISTER) \
|| (chip==S3_PROSAVAGEDDR))
#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \
||(chip==S3_PROSAVAGEDDR))
/* flags */
#define SAVAGE_IS_AGP 1
typedef struct drm_savage_private {
drm_savage_sarea_t *sarea_priv;
drm_savage_buf_priv_t head, tail;
/* who am I? */
enum savage_family chipset;
unsigned int cob_size;
unsigned int bci_threshold_lo, bci_threshold_hi;
unsigned int dma_type;
/* frame buffer layout */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
/* bitmap descriptors for swap and clear */
unsigned int front_bd, back_bd, depth_bd;
/* local textures */
unsigned int texture_offset;
unsigned int texture_size;
/* memory regions in physical memory */
drm_local_map_t *sarea;
drm_local_map_t *mmio;
drm_local_map_t *fb;
drm_local_map_t *aperture;
drm_local_map_t *status;
drm_local_map_t *agp_textures;
drm_local_map_t *cmd_dma;
drm_local_map_t fake_dma;
struct {
int handle;
unsigned long base, size;
} mtrr[3];
/* BCI and status-related stuff */
volatile uint32_t *status_ptr, *bci_ptr;
uint32_t status_used_mask;
uint16_t event_counter;
unsigned int event_wrap;
/* Savage4 command DMA */
drm_savage_dma_page_t *dma_pages;
unsigned int nr_dma_pages, first_dma_page, current_dma_page;
drm_savage_age_t last_dma_age;
/* saved hw state for global/local check on S3D */
uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
/* and for scissors (global, so don't emit if not changed) */
uint32_t hw_scissors_start, hw_scissors_end;
drm_savage_state_t state;
/* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
unsigned int waiting;
/* config/hardware-dependent function pointers */
int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
/* Err, there is a macro wait_event in include/linux/wait.h.
* Avoid unwanted macro expansion. */
void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
drm_clip_rect_t *pbox);
void (*dma_flush)(struct drm_savage_private *dev_priv);
} drm_savage_private_t;
/* ioctls */
extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS);
extern int savage_bci_buffers(DRM_IOCTL_ARGS);
/* BCI functions */
extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
unsigned int flags);
extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf);
extern void savage_dma_reset(drm_savage_private_t *dev_priv);
extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
unsigned int n);
extern int savage_preinit(drm_device_t *dev, unsigned long chipset);
extern int savage_postcleanup(drm_device_t *dev);
extern int savage_do_cleanup_bci(drm_device_t *dev);
extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
/* state functions */
extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
drm_clip_rect_t *pbox);
extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
drm_clip_rect_t *pbox);
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region
* inside the MMIO region */
#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip
* BCI FIFO */
/*
* MMIO registers
*/
#define SAVAGE_STATUS_WORD0 0x48C00
#define SAVAGE_STATUS_WORD1 0x48C04
#define SAVAGE_ALT_STATUS_WORD0 0x48C60
#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
/* Copied from savage_bci.h in the 2D driver with some renaming. */
/* Bitmap descriptors */
#define SAVAGE_BD_STRIDE_SHIFT 0
#define SAVAGE_BD_BPP_SHIFT 16
#define SAVAGE_BD_TILE_SHIFT 24
#define SAVAGE_BD_BW_DISABLE (1<<28)
/* common: */
#define SAVAGE_BD_TILE_LINEAR 0
/* savage4, MX, IX, 3D */
#define SAVAGE_BD_TILE_16BPP 2
#define SAVAGE_BD_TILE_32BPP 3
/* twister, prosavage, DDR, supersavage, 2000 */
#define SAVAGE_BD_TILE_DEST 1
#define SAVAGE_BD_TILE_TEXTURE 2
/* GBD - BCI enable */
/* savage4, MX, IX, 3D */
#define SAVAGE_GBD_BCI_ENABLE 8
/* twister, prosavage, DDR, supersavage, 2000 */
#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0
#define SAVAGE_GBD_BIG_ENDIAN 4
#define SAVAGE_GBD_LITTLE_ENDIAN 0
#define SAVAGE_GBD_64 1
/* Global Bitmap Descriptor */
#define SAVAGE_BCI_GLB_BD_LOW 0x8168
#define SAVAGE_BCI_GLB_BD_HIGH 0x816C
/*
* BCI registers
*/
/* Savage4/Twister/ProSavage 3D registers */
#define SAVAGE_DRAWLOCALCTRL_S4 0x1e
#define SAVAGE_TEXPALADDR_S4 0x1f
#define SAVAGE_TEXCTRL0_S4 0x20
#define SAVAGE_TEXCTRL1_S4 0x21
#define SAVAGE_TEXADDR0_S4 0x22
#define SAVAGE_TEXADDR1_S4 0x23
#define SAVAGE_TEXBLEND0_S4 0x24
#define SAVAGE_TEXBLEND1_S4 0x25
#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */
#define SAVAGE_TEXDESCR_S4 0x27
#define SAVAGE_FOGTABLE_S4 0x28
#define SAVAGE_FOGCTRL_S4 0x30
#define SAVAGE_STENCILCTRL_S4 0x31
#define SAVAGE_ZBUFCTRL_S4 0x32
#define SAVAGE_ZBUFOFF_S4 0x33
#define SAVAGE_DESTCTRL_S4 0x34
#define SAVAGE_DRAWCTRL0_S4 0x35
#define SAVAGE_DRAWCTRL1_S4 0x36
#define SAVAGE_ZWATERMARK_S4 0x37
#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38
#define SAVAGE_TEXBLENDCOLOR_S4 0x39
/* Savage3D/MX/IX 3D registers */
#define SAVAGE_TEXPALADDR_S3D 0x18
#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */
#define SAVAGE_TEXADDR_S3D 0x1A
#define SAVAGE_TEXDESCR_S3D 0x1B
#define SAVAGE_TEXCTRL_S3D 0x1C
#define SAVAGE_FOGTABLE_S3D 0x20
#define SAVAGE_FOGCTRL_S3D 0x30
#define SAVAGE_DRAWCTRL_S3D 0x31
#define SAVAGE_ZBUFCTRL_S3D 0x32
#define SAVAGE_ZBUFOFF_S3D 0x33
#define SAVAGE_DESTCTRL_S3D 0x34
#define SAVAGE_SCSTART_S3D 0x35
#define SAVAGE_SCEND_S3D 0x36
#define SAVAGE_ZWATERMARK_S3D 0x37
#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
/* common stuff */
#define SAVAGE_VERTBUFADDR 0x3e
#define SAVAGE_BITPLANEWTMASK 0xd7
#define SAVAGE_DMABUFADDR 0x51
/* texture enable bits (needed for tex addr checking) */
#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */
#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */
#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */
/* Global fields in Savage4/Twister/ProSavage 3D registers:
*
* All texture registers and DrawLocalCtrl are local. All other
* registers are global. */
/* Global fields in Savage3D/MX/IX 3D registers:
*
* All texture registers are local. DrawCtrl and ZBufCtrl are
* partially local. All other registers are global.
*
* DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
* ZBufCtrl global fields: zCmpFunc, zBufEn
*/
#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c
#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027
/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
*/
#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff
#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff
/*
* BCI commands
*/
#define BCI_CMD_NOP 0x40000000
#define BCI_CMD_RECT 0x48000000
#define BCI_CMD_RECT_XP 0x01000000
#define BCI_CMD_RECT_YP 0x02000000
#define BCI_CMD_SCANLINE 0x50000000
#define BCI_CMD_LINE 0x5C000000
#define BCI_CMD_LINE_LAST_PIXEL 0x58000000
#define BCI_CMD_BYTE_TEXT 0x63000000
#define BCI_CMD_NT_BYTE_TEXT 0x67000000
#define BCI_CMD_BIT_TEXT 0x6C000000
#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF)
#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16))
#define BCI_CMD_SEND_COLOR 0x00008000
#define BCI_CMD_CLIP_NONE 0x00000000
#define BCI_CMD_CLIP_CURRENT 0x00002000
#define BCI_CMD_CLIP_LR 0x00004000
#define BCI_CMD_CLIP_NEW 0x00006000
#define BCI_CMD_DEST_GBD 0x00000000
#define BCI_CMD_DEST_PBD 0x00000800
#define BCI_CMD_DEST_PBD_NEW 0x00000C00
#define BCI_CMD_DEST_SBD 0x00001000
#define BCI_CMD_DEST_SBD_NEW 0x00001400
#define BCI_CMD_SRC_TRANSPARENT 0x00000200
#define BCI_CMD_SRC_SOLID 0x00000000
#define BCI_CMD_SRC_GBD 0x00000020
#define BCI_CMD_SRC_COLOR 0x00000040
#define BCI_CMD_SRC_MONO 0x00000060
#define BCI_CMD_SRC_PBD_COLOR 0x00000080
#define BCI_CMD_SRC_PBD_MONO 0x000000A0
#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0
#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0
#define BCI_CMD_SRC_SBD_COLOR 0x00000100
#define BCI_CMD_SRC_SBD_MONO 0x00000120
#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140
#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160
#define BCI_CMD_PAT_TRANSPARENT 0x00000010
#define BCI_CMD_PAT_NONE 0x00000000
#define BCI_CMD_PAT_COLOR 0x00000002
#define BCI_CMD_PAT_MONO 0x00000003
#define BCI_CMD_PAT_PBD_COLOR 0x00000004
#define BCI_CMD_PAT_PBD_MONO 0x00000005
#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006
#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007
#define BCI_CMD_PAT_SBD_COLOR 0x00000008
#define BCI_CMD_PAT_SBD_MONO 0x00000009
#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A
#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B
#define BCI_BD_BW_DISABLE 0x10000000
#define BCI_BD_TILE_MASK 0x03000000
#define BCI_BD_TILE_NONE 0x00000000
#define BCI_BD_TILE_16 0x02000000
#define BCI_BD_TILE_32 0x03000000
#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF)
#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16))
#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF)
#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF))
#define BCI_CMD_SET_REGISTER 0x96000000
#define BCI_CMD_WAIT 0xC0000000
#define BCI_CMD_WAIT_3D 0x00010000
#define BCI_CMD_WAIT_2D 0x00020000
#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000
#define BCI_CMD_DRAW_PRIM 0x80000000
#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000
#define BCI_CMD_DRAW_CONT 0x01000000
#define BCI_CMD_DRAW_TRILIST 0x00000000
#define BCI_CMD_DRAW_TRISTRIP 0x02000000
#define BCI_CMD_DRAW_TRIFAN 0x04000000
#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff
#define BCI_CMD_DRAW_NO_Z 0x00000001
#define BCI_CMD_DRAW_NO_W 0x00000002
#define BCI_CMD_DRAW_NO_CD 0x00000004
#define BCI_CMD_DRAW_NO_CS 0x00000008
#define BCI_CMD_DRAW_NO_U0 0x00000010
#define BCI_CMD_DRAW_NO_V0 0x00000020
#define BCI_CMD_DRAW_NO_UV0 0x00000030
#define BCI_CMD_DRAW_NO_U1 0x00000040
#define BCI_CMD_DRAW_NO_V1 0x00000080
#define BCI_CMD_DRAW_NO_UV1 0x000000c0
#define BCI_CMD_DMA 0xa8000000
#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF)
#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF)
#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF)
#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF)
#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF)
#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF)
#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF))
#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF))
#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
(((maj) & 0x1FFF) | \
((ym) ? 1<<13 : 0) | \
((xp) ? 1<<14 : 0) | \
((yp) ? 1<<15 : 0) | \
((err) << 16))
/*
* common commands
*/
#define BCI_SET_REGISTERS( first, n ) \
BCI_WRITE(BCI_CMD_SET_REGISTER | \
((uint32_t)(n) & 0xff) << 16 | \
((uint32_t)(first) & 0xffff))
#define DMA_SET_REGISTERS( first, n ) \
DMA_WRITE(BCI_CMD_SET_REGISTER | \
((uint32_t)(n) & 0xff) << 16 | \
((uint32_t)(first) & 0xffff))
#define BCI_DRAW_PRIMITIVE(n, type, skip) \
BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
((n) << 16))
#define DMA_DRAW_PRIMITIVE(n, type, skip) \
DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
((n) << 16))
#define BCI_DRAW_INDICES_S3D(n, type, i0) \
BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
((n) << 16) | (i0))
#define BCI_DRAW_INDICES_S4(n, type, skip) \
BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
(skip) | ((n) << 16))
#define BCI_DMA(n) \
BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
/*
* access to MMIO
*/
#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) )
/*
* access to the burst command interface (BCI)
*/
#define SAVAGE_BCI_DEBUG 1
#define BCI_LOCALS volatile uint32_t *bci_ptr;
#define BEGIN_BCI( n ) do { \
dev_priv->wait_fifo(dev_priv, (n)); \
bci_ptr = dev_priv->bci_ptr; \
} while(0)
#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
#define BCI_COPY_FROM_USER(src,n) do { \
unsigned int i; \
for (i = 0; i < n; ++i) { \
uint32_t val; \
DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \
BCI_WRITE(val); \
} \
} while(0)
/*
* command DMA support
*/
#define SAVAGE_DMA_DEBUG 1
#define DMA_LOCALS uint32_t *dma_ptr;
#define BEGIN_DMA( n ) do { \
unsigned int cur = dev_priv->current_dma_page; \
unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \
dev_priv->dma_pages[cur].used; \
if ((n) > rest) { \
dma_ptr = savage_dma_alloc(dev_priv, (n)); \
} else { /* fast path for small allocations */ \
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \
cur * SAVAGE_DMA_PAGE_SIZE + \
dev_priv->dma_pages[cur].used; \
if (dev_priv->dma_pages[cur].used == 0) \
savage_dma_wait(dev_priv, cur); \
dev_priv->dma_pages[cur].used += (n); \
} \
} while(0)
#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
#define DMA_COPY_FROM_USER(src,n) do { \
DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \
dma_ptr += n; \
} while(0)
#if SAVAGE_DMA_DEBUG
#define DMA_COMMIT() do { \
unsigned int cur = dev_priv->current_dma_page; \
uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \
cur * SAVAGE_DMA_PAGE_SIZE + \
dev_priv->dma_pages[cur].used; \
if (dma_ptr != expected) { \
DRM_ERROR("DMA allocation and use don't match: " \
"%p != %p\n", expected, dma_ptr); \
savage_dma_reset(dev_priv); \
} \
} while(0)
#else
#define DMA_COMMIT() do {/* nothing */} while(0)
#endif
#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
/* Buffer aging via event tag
*/
#define UPDATE_EVENT_COUNTER( ) do { \
if (dev_priv->status_ptr) { \
uint16_t count; \
/* coordinate with Xserver */ \
count = dev_priv->status_ptr[1023]; \
if (count < dev_priv->event_counter) \
dev_priv->event_wrap++; \
dev_priv->event_counter = count; \
} \
} while(0)
#define SET_AGE( age, e, w ) do { \
(age)->event = e; \
(age)->wrap = w; \
} while(0)
#define TEST_AGE( age, e, w ) \
( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
#endif /* __SAVAGE_DRV_H__ */
/* savage_state.c -- State and drawing support for Savage
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
drm_clip_rect_t *pbox)
{
uint32_t scstart = dev_priv->state.s3d.new_scstart;
uint32_t scend = dev_priv->state.s3d.new_scend;
scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
((uint32_t)pbox->x1 & 0x000007ff) |
(((uint32_t)pbox->y1 << 16) & 0x07ff0000);
scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
(((uint32_t)pbox->x2-1) & 0x000007ff) |
((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
if (scstart != dev_priv->state.s3d.scstart ||
scend != dev_priv->state.s3d.scend) {
DMA_LOCALS;
BEGIN_DMA(4);
DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
DMA_WRITE(scstart);
DMA_WRITE(scend);
dev_priv->state.s3d.scstart = scstart;
dev_priv->state.s3d.scend = scend;
dev_priv->waiting = 1;
DMA_COMMIT();
}
}
void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
drm_clip_rect_t *pbox)
{
uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
((uint32_t)pbox->x1 & 0x000007ff) |
(((uint32_t)pbox->y1 << 12) & 0x00fff000);
drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
(((uint32_t)pbox->x2-1) & 0x000007ff) |
((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
drawctrl1 != dev_priv->state.s4.drawctrl1) {
DMA_LOCALS;
BEGIN_DMA(4);
DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
DMA_WRITE(drawctrl0);
DMA_WRITE(drawctrl1);
dev_priv->state.s4.drawctrl0 = drawctrl0;
dev_priv->state.s4.drawctrl1 = drawctrl1;
dev_priv->waiting = 1;
DMA_COMMIT();
}
}
static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
uint32_t addr)
{
if ((addr & 6) != 2) { /* reserved bits */
DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
return DRM_ERR(EINVAL);
}
if (!(addr & 1)) { /* local */
addr &= ~7;
if (addr < dev_priv->texture_offset ||
addr >= dev_priv->texture_offset+dev_priv->texture_size) {
DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n",
unit, addr);
return DRM_ERR(EINVAL);
}
} else { /* AGP */
if (!dev_priv->agp_textures) {
DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
unit, addr);
return DRM_ERR(EINVAL);
}
addr &= ~7;
if (addr < dev_priv->agp_textures->offset ||
addr >= (dev_priv->agp_textures->offset +
dev_priv->agp_textures->size)) {
DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n",
unit, addr);
return DRM_ERR(EINVAL);
}
}
return 0;
}
#define SAVE_STATE(reg,where) \
if(start <= reg && start+count > reg) \
DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start])
#define SAVE_STATE_MASK(reg,where,mask) do { \
if(start <= reg && start+count > reg) { \
uint32_t tmp; \
DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]); \
dev_priv->state.where = (tmp & (mask)) | \
(dev_priv->state.where & ~(mask)); \
} \
} while (0)
static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
unsigned int start, unsigned int count,
const uint32_t __user *regs)
{
if (start < SAVAGE_TEXPALADDR_S3D ||
start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
start, start+count-1);
return DRM_ERR(EINVAL);
}
SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
~SAVAGE_SCISSOR_MASK_S3D);
SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
~SAVAGE_SCISSOR_MASK_S3D);
/* if any texture regs were changed ... */
if (start <= SAVAGE_TEXCTRL_S3D &&
start+count > SAVAGE_TEXPALADDR_S3D) {
/* ... check texture state */
SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
return savage_verify_texaddr(
dev_priv, 0, dev_priv->state.s3d.texaddr);
}
return 0;
}
static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
unsigned int start, unsigned int count,
const uint32_t __user *regs)
{
int ret = 0;
if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
start, start+count-1);
return DRM_ERR(EINVAL);
}
SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
~SAVAGE_SCISSOR_MASK_S4);
SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
~SAVAGE_SCISSOR_MASK_S4);
/* if any texture regs were changed ... */
if (start <= SAVAGE_TEXDESCR_S4 &&
start+count > SAVAGE_TEXPALADDR_S4) {
/* ... check texture state */
SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
ret |= savage_verify_texaddr(
dev_priv, 0, dev_priv->state.s4.texaddr0);
if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
ret |= savage_verify_texaddr(
dev_priv, 1, dev_priv->state.s4.texaddr1);
}
return ret;
}
#undef SAVE_STATE
#undef SAVE_STATE_MASK
static int savage_dispatch_state(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const uint32_t __user *regs)
{
unsigned int count = cmd_header->state.count;
unsigned int start = cmd_header->state.start;
unsigned int count2 = 0;
unsigned int bci_size;
int ret;
DMA_LOCALS;
if (!count)
return 0;
if (DRM_VERIFYAREA_READ(regs, count*4))
return DRM_ERR(EFAULT);
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
ret = savage_verify_state_s3d(dev_priv, start, count, regs);
if (ret != 0)
return ret;
/* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_SCSTART_S3D) {
if (start+count > SAVAGE_SCEND_S3D+1)
count2 = count - (SAVAGE_SCEND_S3D+1 - start);
if (start+count > SAVAGE_SCSTART_S3D)
count = SAVAGE_SCSTART_S3D - start;
} else if (start <= SAVAGE_SCEND_S3D) {
if (start+count > SAVAGE_SCEND_S3D+1) {
count -= SAVAGE_SCEND_S3D+1 - start;
start = SAVAGE_SCEND_S3D+1;
} else
return 0;
}
} else {
ret = savage_verify_state_s4(dev_priv, start, count, regs);
if (ret != 0)
return ret;
/* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_DRAWCTRL0_S4) {
if (start+count > SAVAGE_DRAWCTRL1_S4+1)
count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start);
if (start+count > SAVAGE_DRAWCTRL0_S4)
count = SAVAGE_DRAWCTRL0_S4 - start;
} else if (start <= SAVAGE_DRAWCTRL1_S4) {
if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
count -= SAVAGE_DRAWCTRL1_S4+1 - start;
start = SAVAGE_DRAWCTRL1_S4+1;
} else
return 0;
}
}
bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
if (cmd_header->state.global) {
BEGIN_DMA(bci_size+1);
DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
dev_priv->waiting = 1;
} else {
BEGIN_DMA(bci_size);
}
do {
while (count > 0) {
unsigned int n = count < 255 ? count : 255;
DMA_SET_REGISTERS(start, n);
DMA_COPY_FROM_USER(regs, n);
count -= n;
start += n;
regs += n;
}
start += 2;
regs += 2;
count = count2;
count2 = 0;
} while (count);
DMA_COMMIT();
return 0;
}
static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const drm_buf_t *dmabuf)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->prim.prim;
unsigned int skip = cmd_header->prim.skip;
unsigned int n = cmd_header->prim.count;
unsigned int start = cmd_header->prim.start;
unsigned int i;
BCI_LOCALS;
if (!dmabuf) {
DRM_ERROR("called without dma buffers!\n");
return DRM_ERR(EINVAL);
}
if (!n)
return 0;
switch (prim) {
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
n);
return DRM_ERR(EINVAL);
}
break;
case SAVAGE_PRIM_TRISTRIP:
case SAVAGE_PRIM_TRIFAN:
if (n < 3) {
DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
return DRM_ERR(EINVAL);
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
return DRM_ERR(EINVAL);
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip != 0) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
skip);
return DRM_ERR(EINVAL);
}
} else {
unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
(skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
skip);
return DRM_ERR(EINVAL);
}
if (reorder) {
DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
return DRM_ERR(EINVAL);
}
}
if (start + n > dmabuf->total/32) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
start, start + n - 1, dmabuf->total/32);
return DRM_ERR(EINVAL);
}
/* Vertex DMA doesn't work with command DMA at the same time,
* so we use BCI_... to submit commands here. Flush buffered
* faked DMA first. */
DMA_FLUSH();
if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
BEGIN_BCI(2);
BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
dev_priv->state.common.vbaddr = dmabuf->bus_address;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
/* Workaround for what looks like a hardware bug. If a
* WAIT_3D_IDLE was emitted some time before the
* indexed drawing command then the engine will lock
* up. There are two known workarounds:
* WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
BEGIN_BCI(63);
for (i = 0; i < 63; ++i)
BCI_WRITE(BCI_CMD_WAIT);
dev_priv->waiting = 0;
}
prim <<= 25;
while (n != 0) {
/* Can emit up to 255 indices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
if (reorder) {
/* Need to reorder indices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
int reorder[3] = {-1, -1, -1};
reorder[start%3] = 2;
BEGIN_BCI((count+1+1)/2);
BCI_DRAW_INDICES_S3D(count, prim, start+2);
for (i = start+1; i+1 < start+count; i += 2)
BCI_WRITE((i + reorder[i % 3]) |
((i+1 + reorder[(i+1) % 3]) << 16));
if (i < start+count)
BCI_WRITE(i + reorder[i%3]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
BEGIN_BCI((count+1+1)/2);
BCI_DRAW_INDICES_S3D(count, prim, start);
for (i = start+1; i+1 < start+count; i += 2)
BCI_WRITE(i | ((i+1) << 16));
if (i < start+count)
BCI_WRITE(i);
} else {
BEGIN_BCI((count+2+1)/2);
BCI_DRAW_INDICES_S4(count, prim, skip);
for (i = start; i+1 < start+count; i += 2)
BCI_WRITE(i | ((i+1) << 16));
if (i < start+count)
BCI_WRITE(i);
}
start += count;
n -= count;
prim |= BCI_CMD_DRAW_CONT;
}
return 0;
}
static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const uint32_t __user *vtxbuf,
unsigned int vb_size,
unsigned int vb_stride)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->prim.prim;
unsigned int skip = cmd_header->prim.skip;
unsigned int n = cmd_header->prim.count;
unsigned int start = cmd_header->prim.start;
unsigned int vtx_size;
unsigned int i;
DMA_LOCALS;
if (!n)
return 0;
switch (prim) {
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
n);
return DRM_ERR(EINVAL);
}
break;
case SAVAGE_PRIM_TRISTRIP:
case SAVAGE_PRIM_TRIFAN:
if (n < 3) {
DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
return DRM_ERR(EINVAL);
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
return DRM_ERR(EINVAL);
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip > SAVAGE_SKIP_ALL_S3D) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
return DRM_ERR(EINVAL);
}
vtx_size = 8; /* full vertex */
} else {
if (skip > SAVAGE_SKIP_ALL_S4) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
return DRM_ERR(EINVAL);
}
vtx_size = 10; /* full vertex */
}
vtx_size -= (skip & 1) + (skip >> 1 & 1) +
(skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
(skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
if (vtx_size > vb_stride) {
DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
vtx_size, vb_stride);
return DRM_ERR(EINVAL);
}
if (start + n > vb_size / (vb_stride*4)) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
start, start + n - 1, vb_size / (vb_stride*4));
return DRM_ERR(EINVAL);
}
prim <<= 25;
while (n != 0) {
/* Can emit up to 255 vertices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
if (reorder) {
/* Need to reorder vertices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
int reorder[3] = {-1, -1, -1};
reorder[start%3] = 2;
BEGIN_DMA(count*vtx_size+1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = start; i < start+count; ++i) {
unsigned int j = i + reorder[i % 3];
DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
vtx_size);
}
DMA_COMMIT();
} else {
BEGIN_DMA(count*vtx_size+1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
if (vb_stride == vtx_size) {
DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start],
vtx_size*count);
} else {
for (i = start; i < start+count; ++i) {
DMA_COPY_FROM_USER(
&vtxbuf[vb_stride*i],
vtx_size);
}
}
DMA_COMMIT();
}
start += count;
n -= count;
prim |= BCI_CMD_DRAW_CONT;
}
return 0;
}
static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const uint16_t __user *usr_idx,
const drm_buf_t *dmabuf)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->idx.prim;
unsigned int skip = cmd_header->idx.skip;
unsigned int n = cmd_header->idx.count;
unsigned int i;
BCI_LOCALS;
if (!dmabuf) {
DRM_ERROR("called without dma buffers!\n");
return DRM_ERR(EINVAL);
}
if (!n)
return 0;
switch (prim) {
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n",
n);
return DRM_ERR(EINVAL);
}
break;
case SAVAGE_PRIM_TRISTRIP:
case SAVAGE_PRIM_TRIFAN:
if (n < 3) {
DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
n);
return DRM_ERR(EINVAL);
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
return DRM_ERR(EINVAL);
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip != 0) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
skip);
return DRM_ERR(EINVAL);
}
} else {
unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
(skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
skip);
return DRM_ERR(EINVAL);
}
if (reorder) {
DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
return DRM_ERR(EINVAL);
}
}
/* Vertex DMA doesn't work with command DMA at the same time,
* so we use BCI_... to submit commands here. Flush buffered
* faked DMA first. */
DMA_FLUSH();
if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
BEGIN_BCI(2);
BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
dev_priv->state.common.vbaddr = dmabuf->bus_address;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
/* Workaround for what looks like a hardware bug. If a
* WAIT_3D_IDLE was emitted some time before the
* indexed drawing command then the engine will lock
* up. There are two known workarounds:
* WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
BEGIN_BCI(63);
for (i = 0; i < 63; ++i)
BCI_WRITE(BCI_CMD_WAIT);
dev_priv->waiting = 0;
}
prim <<= 25;
while (n != 0) {
/* Can emit up to 255 indices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
/* Is it ok to allocate 510 bytes on the stack in an ioctl? */
uint16_t idx[255];
/* Copy and check indices */
DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
for (i = 0; i < count; ++i) {
if (idx[i] > dmabuf->total/32) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
i, idx[i], dmabuf->total/32);
return DRM_ERR(EINVAL);
}
}
if (reorder) {
/* Need to reorder indices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
int reorder[3] = {2, -1, -1};
BEGIN_BCI((count+1+1)/2);
BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
for (i = 1; i+1 < count; i += 2)
BCI_WRITE(idx[i + reorder[i % 3]] |
(idx[i+1 + reorder[(i+1) % 3]] << 16));
if (i < count)
BCI_WRITE(idx[i + reorder[i%3]]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
BEGIN_BCI((count+1+1)/2);
BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
for (i = 1; i+1 < count; i += 2)
BCI_WRITE(idx[i] | (idx[i+1] << 16));
if (i < count)
BCI_WRITE(idx[i]);
} else {
BEGIN_BCI((count+2+1)/2);
BCI_DRAW_INDICES_S4(count, prim, skip);
for (i = 0; i+1 < count; i += 2)
BCI_WRITE(idx[i] | (idx[i+1] << 16));
if (i < count)
BCI_WRITE(idx[i]);
}
usr_idx += count;
n -= count;
prim |= BCI_CMD_DRAW_CONT;
}
return 0;
}
static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const uint16_t __user *usr_idx,
const uint32_t __user *vtxbuf,
unsigned int vb_size,
unsigned int vb_stride)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->idx.prim;
unsigned int skip = cmd_header->idx.skip;
unsigned int n = cmd_header->idx.count;
unsigned int vtx_size;
unsigned int i;
DMA_LOCALS;
if (!n)
return 0;
switch (prim) {
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n",
n);
return DRM_ERR(EINVAL);
}
break;
case SAVAGE_PRIM_TRISTRIP:
case SAVAGE_PRIM_TRIFAN:
if (n < 3) {
DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
n);
return DRM_ERR(EINVAL);
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
return DRM_ERR(EINVAL);
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip > SAVAGE_SKIP_ALL_S3D) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
return DRM_ERR(EINVAL);
}
vtx_size = 8; /* full vertex */
} else {
if (skip > SAVAGE_SKIP_ALL_S4) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
return DRM_ERR(EINVAL);
}
vtx_size = 10; /* full vertex */
}
vtx_size -= (skip & 1) + (skip >> 1 & 1) +
(skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
(skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
if (vtx_size > vb_stride) {
DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
vtx_size, vb_stride);
return DRM_ERR(EINVAL);
}
prim <<= 25;
while (n != 0) {
/* Can emit up to 255 vertices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
/* Is it ok to allocate 510 bytes on the stack in an ioctl? */
uint16_t idx[255];
/* Copy and check indices */
DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
for (i = 0; i < count; ++i) {
if (idx[i] > vb_size / (vb_stride*4)) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
i, idx[i], vb_size / (vb_stride*4));
return DRM_ERR(EINVAL);
}
}
if (reorder) {
/* Need to reorder vertices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
int reorder[3] = {2, -1, -1};
BEGIN_DMA(count*vtx_size+1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = 0; i < count; ++i) {
unsigned int j = idx[i + reorder[i % 3]];
DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
vtx_size);
}
DMA_COMMIT();
} else {
BEGIN_DMA(count*vtx_size+1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = 0; i < count; ++i) {
unsigned int j = idx[i];
DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
vtx_size);
}
DMA_COMMIT();
}
usr_idx += count;
n -= count;
prim |= BCI_CMD_DRAW_CONT;
}
return 0;
}
static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const drm_savage_cmd_header_t __user *data,
unsigned int nbox,
const drm_clip_rect_t __user *usr_boxes)
{
unsigned int flags = cmd_header->clear0.flags, mask, value;
unsigned int clear_cmd;
unsigned int i, nbufs;
DMA_LOCALS;
if (nbox == 0)
return 0;
DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t*)data)
->clear1.mask);
DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t*)data)
->clear1.value);
clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
BCI_CMD_SET_ROP(clear_cmd,0xCC);
nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
((flags & SAVAGE_BACK) ? 1 : 0) +
((flags & SAVAGE_DEPTH) ? 1 : 0);
if (nbufs == 0)
return 0;
if (mask != 0xffffffff) {
/* set mask */
BEGIN_DMA(2);
DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
DMA_WRITE(mask);
DMA_COMMIT();
}
for (i = 0; i < nbox; ++i) {
drm_clip_rect_t box;
unsigned int x, y, w, h;
unsigned int buf;
DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
x = box.x1, y = box.y1;
w = box.x2 - box.x1;
h = box.y2 - box.y1;
BEGIN_DMA(nbufs*6);
for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
if (!(flags & buf))
continue;
DMA_WRITE(clear_cmd);
switch(buf) {
case SAVAGE_FRONT:
DMA_WRITE(dev_priv->front_offset);
DMA_WRITE(dev_priv->front_bd);
break;
case SAVAGE_BACK:
DMA_WRITE(dev_priv->back_offset);
DMA_WRITE(dev_priv->back_bd);
break;
case SAVAGE_DEPTH:
DMA_WRITE(dev_priv->depth_offset);
DMA_WRITE(dev_priv->depth_bd);
break;
}
DMA_WRITE(value);
DMA_WRITE(BCI_X_Y(x, y));
DMA_WRITE(BCI_W_H(w, h));
}
DMA_COMMIT();
}
if (mask != 0xffffffff) {
/* reset mask */
BEGIN_DMA(2);
DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
DMA_WRITE(0xffffffff);
DMA_COMMIT();
}
return 0;
}
static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
unsigned int nbox,
const drm_clip_rect_t __user *usr_boxes)
{
unsigned int swap_cmd;
unsigned int i;
DMA_LOCALS;
if (nbox == 0)
return 0;
swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
BCI_CMD_SET_ROP(swap_cmd,0xCC);
for (i = 0; i < nbox; ++i) {
drm_clip_rect_t box;
DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
BEGIN_DMA(6);
DMA_WRITE(swap_cmd);
DMA_WRITE(dev_priv->back_offset);
DMA_WRITE(dev_priv->back_bd);
DMA_WRITE(BCI_X_Y(box.x1, box.y1));
DMA_WRITE(BCI_X_Y(box.x1, box.y1));
DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1));
DMA_COMMIT();
}
return 0;
}
static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t __user *start,
const drm_savage_cmd_header_t __user *end,
const drm_buf_t *dmabuf,
const unsigned int __user *usr_vtxbuf,
unsigned int vb_size, unsigned int vb_stride,
unsigned int nbox,
const drm_clip_rect_t __user *usr_boxes)
{
unsigned int i, j;
int ret;
for (i = 0; i < nbox; ++i) {
drm_clip_rect_t box;
const drm_savage_cmd_header_t __user *usr_cmdbuf;
DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
dev_priv->emit_clip_rect(dev_priv, &box);
usr_cmdbuf = start;
while (usr_cmdbuf < end) {
drm_savage_cmd_header_t cmd_header;
DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
sizeof(cmd_header));
usr_cmdbuf++;
switch (cmd_header.cmd.cmd) {
case SAVAGE_CMD_DMA_PRIM:
ret = savage_dispatch_dma_prim(
dev_priv, &cmd_header, dmabuf);
break;
case SAVAGE_CMD_VB_PRIM:
ret = savage_dispatch_vb_prim(
dev_priv, &cmd_header,
(const uint32_t __user *)usr_vtxbuf,
vb_size, vb_stride);
break;
case SAVAGE_CMD_DMA_IDX:
j = (cmd_header.idx.count + 3) / 4;
/* j was check in savage_bci_cmdbuf */
ret = savage_dispatch_dma_idx(
dev_priv, &cmd_header,
(const uint16_t __user *)usr_cmdbuf,
dmabuf);
usr_cmdbuf += j;
break;
case SAVAGE_CMD_VB_IDX:
j = (cmd_header.idx.count + 3) / 4;
/* j was check in savage_bci_cmdbuf */
ret = savage_dispatch_vb_idx(
dev_priv, &cmd_header,
(const uint16_t __user *)usr_cmdbuf,
(const uint32_t __user *)usr_vtxbuf,
vb_size, vb_stride);
usr_cmdbuf += j;
break;
default:
/* What's the best return code? EFAULT? */
DRM_ERROR("IMPLEMENTATION ERROR: "
"non-drawing-command %d\n",
cmd_header.cmd.cmd);
return DRM_ERR(EINVAL);
}
if (ret != 0)
return ret;
}
}
return 0;
}
int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *dmabuf;
drm_savage_cmdbuf_t cmdbuf;
drm_savage_cmd_header_t __user *usr_cmdbuf;
drm_savage_cmd_header_t __user *first_draw_cmd;
unsigned int __user *usr_vtxbuf;
drm_clip_rect_t __user *usr_boxes;
unsigned int i, j;
int ret = 0;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
sizeof(cmdbuf));
if (dma && dma->buflist) {
if (cmdbuf.dma_idx > dma->buf_count) {
DRM_ERROR("vertex buffer index %u out of range (0-%u)\n",
cmdbuf.dma_idx, dma->buf_count-1);
return DRM_ERR(EINVAL);
}
dmabuf = dma->buflist[cmdbuf.dma_idx];
} else {
dmabuf = NULL;
}
usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr;
usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr;
if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) ||
(cmdbuf.vb_size && DRM_VERIFYAREA_READ(
usr_vtxbuf, cmdbuf.vb_size)) ||
(cmdbuf.nbox && DRM_VERIFYAREA_READ(
usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t))))
return DRM_ERR(EFAULT);
/* Make sure writes to DMA buffers are finished before sending
* DMA commands to the graphics hardware. */
DRM_MEMORYBARRIER();
/* Coming from user space. Don't know if the Xserver has
* emitted wait commands. Assuming the worst. */
dev_priv->waiting = 1;
i = 0;
first_draw_cmd = NULL;
while (i < cmdbuf.size) {
drm_savage_cmd_header_t cmd_header;
DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
sizeof(cmd_header));
usr_cmdbuf++;
i++;
/* Group drawing commands with same state to minimize
* iterations over clip rects. */
j = 0;
switch (cmd_header.cmd.cmd) {
case SAVAGE_CMD_DMA_IDX:
case SAVAGE_CMD_VB_IDX:
j = (cmd_header.idx.count + 3) / 4;
if (i + j > cmdbuf.size) {
DRM_ERROR("indexed drawing command extends "
"beyond end of command buffer\n");
DMA_FLUSH();
return DRM_ERR(EINVAL);
}
/* fall through */
case SAVAGE_CMD_DMA_PRIM:
case SAVAGE_CMD_VB_PRIM:
if (!first_draw_cmd)
first_draw_cmd = usr_cmdbuf-1;
usr_cmdbuf += j;
i += j;
break;
default:
if (first_draw_cmd) {
ret = savage_dispatch_draw (
dev_priv, first_draw_cmd, usr_cmdbuf-1,
dmabuf, usr_vtxbuf, cmdbuf.vb_size,
cmdbuf.vb_stride,
cmdbuf.nbox, usr_boxes);
if (ret != 0)
return ret;
first_draw_cmd = NULL;
}
}
if (first_draw_cmd)
continue;
switch (cmd_header.cmd.cmd) {
case SAVAGE_CMD_STATE:
j = (cmd_header.state.count + 1) / 2;
if (i + j > cmdbuf.size) {
DRM_ERROR("command SAVAGE_CMD_STATE extends "
"beyond end of command buffer\n");
DMA_FLUSH();
return DRM_ERR(EINVAL);
}
ret = savage_dispatch_state(
dev_priv, &cmd_header,
(uint32_t __user *)usr_cmdbuf);
usr_cmdbuf += j;
i += j;
break;
case SAVAGE_CMD_CLEAR:
if (i + 1 > cmdbuf.size) {
DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
"beyond end of command buffer\n");
DMA_FLUSH();
return DRM_ERR(EINVAL);
}
ret = savage_dispatch_clear(dev_priv, &cmd_header,
usr_cmdbuf,
cmdbuf.nbox, usr_boxes);
usr_cmdbuf++;
i++;
break;
case SAVAGE_CMD_SWAP:
ret = savage_dispatch_swap(dev_priv,
cmdbuf.nbox, usr_boxes);
break;
default:
DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
DMA_FLUSH();
return DRM_ERR(EINVAL);
}
if (ret != 0) {
DMA_FLUSH();
return ret;
}
}
if (first_draw_cmd) {
ret = savage_dispatch_draw (
dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf,
usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride,
cmdbuf.nbox, usr_boxes);
if (ret != 0) {
DMA_FLUSH();
return ret;
}
}
DMA_FLUSH();
if (dmabuf && cmdbuf.discard) {
drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
uint16_t event;
event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
savage_freelist_put(dev, dmabuf);
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment