Commit 43d332b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge with DRI CVS tree.

 - add support for sending a signal on vblank
 - remove unhelpful AGP chipset strings
 - update radeon texture cache handling
parent c5fc26c9
...@@ -346,17 +346,30 @@ typedef struct drm_irq_busid { ...@@ -346,17 +346,30 @@ typedef struct drm_irq_busid {
} drm_irq_busid_t; } drm_irq_busid_t;
typedef enum { typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */ _DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1 /* Wait for given number of vblanks */ _DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
_DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t; } drm_vblank_seq_type_t;
typedef struct drm_radeon_vbl_wait { #define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
struct drm_wait_vblank_request {
drm_vblank_seq_type_t type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
drm_vblank_seq_type_t type; drm_vblank_seq_type_t type;
unsigned int sequence; unsigned int sequence;
long tval_sec; long tval_sec;
long tval_usec; long tval_usec;
} drm_wait_vblank_t; };
typedef union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
} drm_wait_vblank_t;
typedef struct drm_agp_mode { typedef struct drm_agp_mode {
unsigned long mode; unsigned long mode;
......
...@@ -487,7 +487,6 @@ typedef struct drm_agp_mem { ...@@ -487,7 +487,6 @@ typedef struct drm_agp_mem {
typedef struct drm_agp_head { typedef struct drm_agp_head {
agp_kern_info agp_info; agp_kern_info agp_info;
const char *chipset;
drm_agp_mem_t *memory; drm_agp_mem_t *memory;
unsigned long mode; unsigned long mode;
int enabled; int enabled;
...@@ -517,6 +516,17 @@ typedef struct drm_map_list { ...@@ -517,6 +516,17 @@ typedef struct drm_map_list {
drm_map_t *map; drm_map_t *map;
} drm_map_list_t; } drm_map_list_t;
#if __HAVE_VBL_IRQ
typedef struct drm_vbl_sig {
struct list_head head;
unsigned int sequence;
struct siginfo info;
struct task_struct *task;
} drm_vbl_sig_t;
#endif
typedef struct drm_device { typedef struct drm_device {
const char *name; /* Simple driver name */ const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */ char *unique; /* Unique identifier: e.g., busid */
...@@ -579,6 +589,8 @@ typedef struct drm_device { ...@@ -579,6 +589,8 @@ typedef struct drm_device {
#if __HAVE_VBL_IRQ #if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; wait_queue_head_t vbl_queue;
atomic_t vbl_received; atomic_t vbl_received;
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs;
#endif #endif
cycles_t ctx_start; cycles_t ctx_start;
cycles_t lck_start; cycles_t lck_start;
...@@ -819,6 +831,7 @@ extern void DRM(driver_irq_uninstall)( drm_device_t *dev ); ...@@ -819,6 +831,7 @@ extern void DRM(driver_irq_uninstall)( drm_device_t *dev );
extern int DRM(wait_vblank)(struct inode *inode, struct file *filp, extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq); extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif #endif
#if __HAVE_DMA_IRQ_BH #if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( void *dev ); extern void DRM(dma_immediate_bh)( void *dev );
......
...@@ -260,64 +260,6 @@ drm_agp_head_t *DRM(agp_init)(void) ...@@ -260,64 +260,6 @@ drm_agp_head_t *DRM(agp_init)(void)
return NULL; return NULL;
} }
head->memory = NULL; head->memory = NULL;
switch (head->agp_info.chipset) {
case INTEL_GENERIC: head->chipset = "Intel"; break;
case INTEL_LX: head->chipset = "Intel 440LX"; break;
case INTEL_BX: head->chipset = "Intel 440BX"; break;
case INTEL_GX: head->chipset = "Intel 440GX"; break;
case INTEL_I810: head->chipset = "Intel i810"; break;
case INTEL_I815: head->chipset = "Intel i815"; break;
#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
case INTEL_I820: head->chipset = "Intel i820"; break;
#endif
case INTEL_I840: head->chipset = "Intel i840"; break;
#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
case INTEL_I845: head->chipset = "Intel i845"; break;
#endif
case INTEL_I850: head->chipset = "Intel i850"; break;
case INTEL_460GX: head->chipset = "Intel 460GX"; break;
case VIA_GENERIC: head->chipset = "VIA"; break;
case VIA_VP3: head->chipset = "VIA VP3"; break;
case VIA_MVP3: head->chipset = "VIA MVP3"; break;
case VIA_MVP4: head->chipset = "VIA MVP4"; break;
case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133";
break;
case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133";
break;
case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro";
break;
case SIS_GENERIC: head->chipset = "SiS"; break;
case AMD_GENERIC: head->chipset = "AMD"; break;
case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
case ALI_GENERIC: head->chipset = "ALi"; break;
case ALI_M1541: head->chipset = "ALi M1541"; break;
#if LINUX_VERSION_CODE >= 0x020402
case ALI_M1621: head->chipset = "ALi M1621"; break;
case ALI_M1631: head->chipset = "ALi M1631"; break;
case ALI_M1632: head->chipset = "ALi M1632"; break;
case ALI_M1641: head->chipset = "ALi M1641"; break;
case ALI_M1644: head->chipset = "ALi M1644"; break;
case ALI_M1647: head->chipset = "ALi M1647"; break;
case ALI_M1651: head->chipset = "ALi M1651"; break;
#endif
#if LINUX_VERSION_CODE >= 0x020406
case SVWRKS_HE: head->chipset = "Serverworks HE";
break;
case SVWRKS_LE: head->chipset = "Serverworks LE";
break;
case SVWRKS_GENERIC: head->chipset = "Serverworks Generic";
break;
#endif
case HP_ZX1: head->chipset = "HP ZX1"; break;
default: head->chipset = "Unknown"; break;
}
#if LINUX_VERSION_CODE <= 0x020408 #if LINUX_VERSION_CODE <= 0x020408
head->cant_use_aperture = 0; head->cant_use_aperture = 0;
head->page_mask = ~(0xfff); head->page_mask = ~(0xfff);
...@@ -325,13 +267,12 @@ drm_agp_head_t *DRM(agp_init)(void) ...@@ -325,13 +267,12 @@ drm_agp_head_t *DRM(agp_init)(void)
head->cant_use_aperture = head->agp_info.cant_use_aperture; head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask; head->page_mask = head->agp_info.page_mask;
#endif #endif
DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n", DRM_DEBUG("AGP %d.%d, aperture @ 0x%08lx %ZuMB\n",
head->agp_info.version.major, head->agp_info.version.major,
head->agp_info.version.minor, head->agp_info.version.minor,
head->chipset, head->agp_info.aper_base,
head->agp_info.aper_base, head->agp_info.aper_size);
head->agp_info.aper_size);
} }
return head; return head;
} }
......
...@@ -537,6 +537,10 @@ int DRM(irq_install)( drm_device_t *dev, int irq ) ...@@ -537,6 +537,10 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
#if __HAVE_VBL_IRQ #if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue); init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
#endif #endif
/* Before installing handler */ /* Before installing handler */
...@@ -607,7 +611,8 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) ...@@ -607,7 +611,8 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
drm_device_t *dev = priv->dev; drm_device_t *dev = priv->dev;
drm_wait_vblank_t vblwait; drm_wait_vblank_t vblwait;
struct timeval now; struct timeval now;
int ret; int ret = 0;
unsigned int flags;
if (!dev->irq) if (!dev->irq)
return -EINVAL; return -EINVAL;
...@@ -615,15 +620,45 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) ...@@ -615,15 +620,45 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) ); sizeof(vblwait) );
if ( vblwait.type == _DRM_VBLANK_RELATIVE ) { switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
vblwait.sequence += atomic_read( &dev->vbl_received ); case _DRM_VBLANK_RELATIVE:
vblwait.request.sequence += atomic_read( &dev->vbl_received );
case _DRM_VBLANK_ABSOLUTE:
break;
default:
return -EINVAL;
} }
ret = DRM(vblank_wait)( dev, &vblwait.sequence ); flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if ( flags & _DRM_VBLANK_SIGNAL ) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) );
if ( !vbl_sig )
return -ENOMEM;
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
do_gettimeofday( &now ); vbl_sig->sequence = vblwait.request.sequence;
vblwait.tval_sec = now.tv_sec; vbl_sig->info.si_signo = vblwait.request.signal;
vblwait.tval_usec = now.tv_usec; vbl_sig->task = current;
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
/* Hook signal entry into list */
spin_lock_irqsave( &dev->vbl_lock, irqflags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
} else {
ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
do_gettimeofday( &now );
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
}
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) ); sizeof(vblwait) );
...@@ -631,6 +666,33 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) ...@@ -631,6 +666,33 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
return ret; return ret;
} }
void DRM(vbl_send_signals)( drm_device_t *dev )
{
struct list_head *entry, *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags );
list_for_each_safe( entry, tmp, &dev->vbl_sigs.head ) {
vbl_sig = (drm_vbl_sig_t *) entry;
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
vbl_sig->info.si_code = atomic_read( &dev->vbl_received );
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
list_del( entry );
DRM_FREE( entry );
}
}
spin_unlock_irqrestore( &dev->vbl_lock, flags );
}
#endif /* __HAVE_VBL_IRQ */ #endif /* __HAVE_VBL_IRQ */
#else #else
......
...@@ -408,7 +408,7 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma) ...@@ -408,7 +408,7 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
vma->vm_flags &= VM_MAYWRITE; vma->vm_flags &= VM_MAYWRITE;
#if defined(__i386__) #if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else #else
/* Ye gads this is ugly. With more thought /* Ye gads this is ugly. With more thought
...@@ -435,7 +435,7 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma) ...@@ -435,7 +435,7 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS: case _DRM_REGISTERS:
if (VM_OFFSET(vma) >= __pa(high_memory)) { if (VM_OFFSET(vma) >= __pa(high_memory)) {
#if defined(__i386__) #if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
......
...@@ -50,6 +50,7 @@ void mga_dma_service( DRM_IRQ_ARGS ) ...@@ -50,6 +50,7 @@ void mga_dma_service( DRM_IRQ_ARGS )
MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR ); MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
atomic_inc(&dev->vbl_received); atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue); DRM_WAKEUP(&dev->vbl_queue);
DRM(vbl_send_signals)( dev );
} }
} }
...@@ -64,7 +65,7 @@ int mga_vblank_wait(drm_device_t *dev, unsigned int *sequence) ...@@ -64,7 +65,7 @@ int mga_vblank_wait(drm_device_t *dev, unsigned int *sequence)
*/ */
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ, DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) ) ( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
+ ~*sequence + 1 ) <= (1<<23) ) ); - *sequence ) <= (1<<23) ) );
*sequence = cur_vblank; *sequence = cur_vblank;
......
...@@ -71,7 +71,7 @@ ...@@ -71,7 +71,7 @@
[DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, \ [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_R128_GETPARAM)] = { r128_getparam, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_R128_GETPARAM)] = { r128_getparam, 1, 0 },
/* Driver customization: /* Driver customization:
*/ */
......
...@@ -50,6 +50,7 @@ void r128_dma_service( DRM_IRQ_ARGS ) ...@@ -50,6 +50,7 @@ void r128_dma_service( DRM_IRQ_ARGS )
R128_WRITE( R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK ); R128_WRITE( R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK );
atomic_inc(&dev->vbl_received); atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue); DRM_WAKEUP(&dev->vbl_queue);
DRM(vbl_send_signals)( dev );
} }
} }
...@@ -64,7 +65,7 @@ int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence) ...@@ -64,7 +65,7 @@ int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
*/ */
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ, DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) ) ( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
+ ~*sequence + 1 ) <= (1<<23) ) ); - *sequence ) <= (1<<23) ) );
*sequence = cur_vblank; *sequence = cur_vblank;
......
...@@ -1497,7 +1497,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev ) ...@@ -1497,7 +1497,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
} }
} }
DRM_ERROR( "returning NULL!\n" ); DRM_DEBUG( "returning NULL!\n" );
return NULL; return NULL;
} }
#if 0 #if 0
......
...@@ -70,13 +70,12 @@ void DRM(dma_service)( DRM_IRQ_ARGS ) ...@@ -70,13 +70,12 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
DRM_WAKEUP( &dev_priv->swi_queue ); DRM_WAKEUP( &dev_priv->swi_queue );
} }
#if __HAVE_VBL_IRQ
/* VBLANK interrupt */ /* VBLANK interrupt */
if (stat & RADEON_CRTC_VBLANK_STAT) { if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received); atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue); DRM_WAKEUP(&dev->vbl_queue);
DRM(vbl_send_signals)( dev );
} }
#endif
/* Acknowledge all the bits in GEN_INT_STATUS -- seem to get /* Acknowledge all the bits in GEN_INT_STATUS -- seem to get
* more than we asked for... * more than we asked for...
...@@ -138,7 +137,6 @@ int radeon_emit_and_wait_irq(drm_device_t *dev) ...@@ -138,7 +137,6 @@ int radeon_emit_and_wait_irq(drm_device_t *dev)
} }
#if __HAVE_VBL_IRQ
int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence) int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
{ {
drm_radeon_private_t *dev_priv = drm_radeon_private_t *dev_priv =
...@@ -161,13 +159,12 @@ int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence) ...@@ -161,13 +159,12 @@ int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
*/ */
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ, DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) ) ( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
+ ~*sequence + 1 ) <= (1<<23) ) ); - *sequence ) <= (1<<23) ) );
*sequence = cur_vblank; *sequence = cur_vblank;
return ret; return ret;
} }
#endif
/* Needs the lock as it touches the ring. /* Needs the lock as it touches the ring.
......
...@@ -1074,19 +1074,30 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev, ...@@ -1074,19 +1074,30 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
const u8 *data; const u8 *data;
int size, dwords, tex_width, blit_width; int size, dwords, tex_width, blit_width;
u32 y, height; u32 y, height;
int ret = 0, i; int i;
RING_LOCALS; RING_LOCALS;
dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
/* FIXME: Be smarter about this... /* Flush the pixel cache. This ensures no pixel data gets mixed
* up with the texture data from the host data blit, otherwise
* part of the texture image may be corrupted.
*/ */
buf = radeon_freelist_get( dev ); BEGIN_RING( 4 );
if ( !buf ) return DRM_ERR(EAGAIN); RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
tex->offset >> 10, tex->pitch, tex->format,
image->x, image->y, image->width, image->height );
/* The compiler won't optimize away a division by a variable, /* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll * even if the only legal values are powers of two. Thus, we'll
...@@ -1120,127 +1131,113 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev, ...@@ -1120,127 +1131,113 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
DRM_DEBUG( " tex=%dx%d blit=%d\n", DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
tex_width, tex->height, blit_width );
/* Flush the pixel cache. This ensures no pixel data gets mixed
* up with the texture data from the host data blit, otherwise
* part of the texture image may be corrupted.
*/
BEGIN_RING( 4 );
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
/* Make a copy of the parameters in case we have to update them
* for a multi-pass texture blit.
*/
y = image->y;
height = image->height;
data = (const u8 *)image->data;
size = height * blit_width;
if ( size > RADEON_MAX_TEXTURE_SIZE ) { do {
/* Texture image is too large, do a multipass upload */ DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
ret = DRM_ERR(EAGAIN); tex->offset >> 10, tex->pitch, tex->format,
image->x, image->y, image->width, image->height );
/* Adjust the blit size to fit the indirect buffer */ /* Make a copy of the parameters in case we have to
height = RADEON_MAX_TEXTURE_SIZE / blit_width; * update them for a multi-pass texture blit.
*/
y = image->y;
height = image->height;
data = (const u8 *)image->data;
size = height * blit_width; size = height * blit_width;
if ( size > RADEON_MAX_TEXTURE_SIZE ) {
height = RADEON_MAX_TEXTURE_SIZE / blit_width;
size = height * blit_width;
} else if ( size < 4 && size > 0 ) {
size = 4;
} else if ( size == 0 ) {
return 0;
}
/* Update the input parameters for next time */ /* Update the input parameters for next time */
image->y += height; image->y += height;
image->height -= height; image->height -= height;
image->data = (const char *)image->data + size; image->data += size;
if ( DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ) ) { buf = radeon_freelist_get( dev );
DRM_ERROR( "EFAULT on tex->image\n" ); if ( 0 && !buf ) {
return DRM_ERR(EFAULT); radeon_do_cp_idle( dev_priv );
buf = radeon_freelist_get( dev );
}
if ( !buf ) {
DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
return DRM_ERR(EAGAIN);
} }
} else if ( size < 4 && size > 0 ) {
size = 4;
}
dwords = size / 4;
/* Dispatch the indirect buffer. /* Dispatch the indirect buffer.
*/
buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_HOST_DATA |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff;
buffer[5] = (y << 16) | image->x;
buffer[6] = (height << 16) | image->width;
buffer[7] = dwords;
buffer += 8;
if ( tex_width >= 32 ) {
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
if ( DRM_COPY_FROM_USER( buffer, data, dwords * sizeof(u32) ) ) {
DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );
return DRM_ERR(EFAULT);
}
} else {
/* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/ */
for ( i = 0 ; i < tex->height ; i++ ) { buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
if ( DRM_COPY_FROM_USER( buffer, data, tex_width ) ) { dwords = size / 4;
DRM_ERROR( "EFAULT on pad, %d bytes\n", buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
tex_width ); buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_HOST_DATA |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff;
buffer[5] = (y << 16) | image->x;
buffer[6] = (height << 16) | image->width;
buffer[7] = dwords;
buffer += 8;
if ( tex_width >= 32 ) {
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
if ( DRM_COPY_FROM_USER( buffer, data,
dwords * sizeof(u32) ) ) {
DRM_ERROR( "EFAULT on data, %d dwords\n",
dwords );
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
} }
buffer += 8; } else {
data += tex_width; /* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/
for ( i = 0 ; i < tex->height ; i++ ) {
if ( DRM_COPY_FROM_USER( buffer, data,
tex_width ) ) {
DRM_ERROR( "EFAULT on pad, %d bytes\n",
tex_width );
return DRM_ERR(EFAULT);
}
buffer += 8;
data += tex_width;
}
} }
}
buf->pid = DRM_CURRENTPID; buf->pid = DRM_CURRENTPID;
buf->used = (dwords + 8) * sizeof(u32); buf->used = (dwords + 8) * sizeof(u32);
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
radeon_cp_discard_buffer( dev, buf );
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); } while (image->height > 0);
radeon_cp_discard_buffer( dev, buf );
/* Flush the pixel cache after the blit completes. This ensures /* Flush the pixel cache after the blit completes. This ensures
* the texture data is written out to memory before rendering * the texture data is written out to memory before rendering
* continues. * continues.
*/ */
BEGIN_RING( 4 ); BEGIN_RING( 4 );
RADEON_FLUSH_CACHE(); RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_2D_IDLE(); RADEON_WAIT_UNTIL_2D_IDLE();
ADVANCE_RING(); ADVANCE_RING();
return 0;
return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment