Commit 4b5d2381 authored by Dave Airlie's avatar Dave Airlie

drm: fix race condition in radeon driver

Close a race which could allow for privilege escalation by users with DRI
privileges on Radeon hardware.  Essentially, a malicious program could submit
a packet containing an offset (possibly in main memory) to be rendered from/to,
while a separate thread switched that offset in userspace rapidly between a
valid value and an invalid one.  radeon_check_and_fixup_offset() would pull the
offset in from user space, check it, and spit it back out to user space to be
copied in later by the emit code.  It would sometimes catch the bad value, but
sometimes the malicious program could modify it after the check and get an
invalid offset rendered from/to.

Fix this by allocating a temporary buffer and copying the data in at once.
While here, make the cliprects stuff not do the VERIFYAREA_READ and
COPY_FROM_USER_UNCHECKED gymnastics, avoiding a lock order reversal on FreeBSD.
Performance impact is negligible  -- no difference on r200 to ~1% improvement on
rv200 in quake3 tests (P4 1Ghz, demofour at 1024x768, n=4 or 5)

From: Eric Anholt <anholt@freebsd.org>
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent 149d5cef
......@@ -96,9 +96,6 @@ static __inline__ int mtrr_del (int reg, unsigned long base,
__copy_to_user(arg1, arg2, arg3)
#define DRM_GET_USER_UNCHECKED(val, uaddr) \
__get_user(val, uaddr)
#define DRM_PUT_USER_UNCHECKED(uaddr, val) \
__put_user(val, uaddr)
#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
......
......@@ -1027,25 +1027,27 @@ do { \
} while (0)
#define OUT_RING_USER_TABLE( tab, sz ) do { \
#define OUT_RING_TABLE( tab, sz ) do { \
int _size = (sz); \
int __user *_tab = (tab); \
int *_tab = (int *)(tab); \
\
if (write + _size > mask) { \
int i = (mask+1) - write; \
if (DRM_COPY_FROM_USER_UNCHECKED( (int *)(ring+write), \
_tab, i*4 )) \
return DRM_ERR(EFAULT); \
int _i = (mask+1) - write; \
_size -= _i; \
while (_i > 0 ) { \
*(int *)(ring + write) = *_tab++; \
write++; \
_i--; \
} \
write = 0; \
_size -= i; \
_tab += i; \
_tab += _i; \
} \
\
if (_size && DRM_COPY_FROM_USER_UNCHECKED( (int *)(ring+write), \
_tab, _size*4 )) \
return DRM_ERR(EFAULT); \
\
write += _size; \
while (_size > 0) { \
*(ring + write) = *_tab++; \
write++; \
_size--; \
} \
write &= mask; \
} while (0)
......
......@@ -93,21 +93,6 @@ static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_p
return 0;
}
static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
u32 __user *offset ) {
u32 off;
DRM_GET_USER_UNCHECKED( off, offset );
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &off ) )
return DRM_ERR( EINVAL );
DRM_PUT_USER_UNCHECKED( offset, off );
return 0;
}
static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
int id,
......@@ -115,7 +100,7 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
switch ( id ) {
case RADEON_EMIT_PP_MISC:
if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
&data[( RADEON_RB3D_DEPTHOFFSET
- RADEON_PP_MISC ) / 4] ) ) {
DRM_ERROR( "Invalid depth buffer offset\n" );
......@@ -124,7 +109,7 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
break;
case RADEON_EMIT_PP_CNTL:
if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
&data[( RADEON_RB3D_COLOROFFSET
- RADEON_PP_CNTL ) / 4] ) ) {
DRM_ERROR( "Invalid colour buffer offset\n" );
......@@ -138,7 +123,7 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
case R200_EMIT_PP_TXOFFSET_3:
case R200_EMIT_PP_TXOFFSET_4:
case R200_EMIT_PP_TXOFFSET_5:
if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
&data[0] ) ) {
DRM_ERROR( "Invalid R200 texture offset\n" );
return DRM_ERR( EINVAL );
......@@ -148,7 +133,7 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
case RADEON_EMIT_PP_TXFILTER_0:
case RADEON_EMIT_PP_TXFILTER_1:
case RADEON_EMIT_PP_TXFILTER_2:
if ( radeon_check_and_fixup_offset_user( dev_priv, filp_priv,
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
&data[( RADEON_PP_TXOFFSET_0
- RADEON_PP_TXFILTER_0 ) / 4] ) ) {
DRM_ERROR( "Invalid R100 texture offset\n" );
......@@ -164,8 +149,7 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
case R200_EMIT_PP_CUBIC_OFFSETS_5: {
int i;
for ( i = 0; i < 5; i++ ) {
if ( radeon_check_and_fixup_offset_user( dev_priv,
filp_priv,
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
&data[i] ) ) {
DRM_ERROR( "Invalid R200 cubic texture offset\n" );
return DRM_ERR( EINVAL );
......@@ -250,17 +234,11 @@ static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_
drm_file_t *filp_priv,
drm_radeon_cmd_buffer_t *cmdbuf,
unsigned int *cmdsz ) {
u32 tmp[4];
u32 __user *cmd = (u32 __user *)cmdbuf->buf;
if ( DRM_COPY_FROM_USER_UNCHECKED( tmp, cmd, sizeof( tmp ) ) ) {
DRM_ERROR( "Failed to copy data from user space\n" );
return DRM_ERR( EFAULT );
}
u32 *cmd = (u32 *) cmdbuf->buf;
*cmdsz = 2 + ( ( tmp[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 );
*cmdsz = 2 + ( ( cmd[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 );
if ( ( tmp[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) {
if ( ( cmd[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) {
DRM_ERROR( "Not a type 3 packet\n" );
return DRM_ERR( EINVAL );
}
......@@ -271,32 +249,27 @@ static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_
}
/* Check client state and fix it up if necessary */
if ( tmp[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */
if ( cmd[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */
u32 offset;
if ( tmp[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL
if ( cmd[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
offset = tmp[2] << 10;
offset = cmd[2] << 10;
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
DRM_ERROR( "Invalid first packet offset\n" );
return DRM_ERR( EINVAL );
}
tmp[2] = ( tmp[2] & 0xffc00000 ) | offset >> 10;
cmd[2] = ( cmd[2] & 0xffc00000 ) | offset >> 10;
}
if ( ( tmp[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) &&
( tmp[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
offset = tmp[3] << 10;
if ( ( cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) &&
( cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
offset = cmd[3] << 10;
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
DRM_ERROR( "Invalid second packet offset\n" );
return DRM_ERR( EINVAL );
}
tmp[3] = ( tmp[3] & 0xffc00000 ) | offset >> 10;
}
if ( DRM_COPY_TO_USER_UNCHECKED( cmd, tmp, sizeof( tmp ) ) ) {
DRM_ERROR( "Failed to copy data to user space\n" );
return DRM_ERR( EFAULT );
cmd[3] = ( cmd[3] & 0xffc00000 ) | offset >> 10;
}
}
......@@ -2473,7 +2446,7 @@ static int radeon_emit_packets(
{
int id = (int)header.packet.packet_id;
int sz, reg;
int __user *data = (int __user *)cmdbuf->buf;
int *data = (int *)cmdbuf->buf;
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
......@@ -2494,7 +2467,7 @@ static int radeon_emit_packets(
BEGIN_RING(sz+1);
OUT_RING( CP_PACKET0( reg, (sz-1) ) );
OUT_RING_USER_TABLE( data, sz );
OUT_RING_TABLE( data, sz );
ADVANCE_RING();
cmdbuf->buf += sz * sizeof(int);
......@@ -2508,7 +2481,6 @@ static __inline__ int radeon_emit_scalars(
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
int __user *data = (int __user *)cmdbuf->buf;
int start = header.scalars.offset;
int stride = header.scalars.stride;
RING_LOCALS;
......@@ -2517,7 +2489,7 @@ static __inline__ int radeon_emit_scalars(
OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
OUT_RING_USER_TABLE( data, sz );
OUT_RING_TABLE( cmdbuf->buf, sz );
ADVANCE_RING();
cmdbuf->buf += sz * sizeof(int);
cmdbuf->bufsz -= sz * sizeof(int);
......@@ -2532,7 +2504,6 @@ static __inline__ int radeon_emit_scalars2(
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
int __user *data = (int __user *)cmdbuf->buf;
int start = ((unsigned int)header.scalars.offset) + 0x100;
int stride = header.scalars.stride;
RING_LOCALS;
......@@ -2541,7 +2512,7 @@ static __inline__ int radeon_emit_scalars2(
OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
OUT_RING_USER_TABLE( data, sz );
OUT_RING_TABLE( cmdbuf->buf, sz );
ADVANCE_RING();
cmdbuf->buf += sz * sizeof(int);
cmdbuf->bufsz -= sz * sizeof(int);
......@@ -2554,7 +2525,6 @@ static __inline__ int radeon_emit_vectors(
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.vectors.count;
int __user *data = (int __user *)cmdbuf->buf;
int start = header.vectors.offset;
int stride = header.vectors.stride;
RING_LOCALS;
......@@ -2563,7 +2533,7 @@ static __inline__ int radeon_emit_vectors(
OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) );
OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) );
OUT_RING_USER_TABLE( data, sz );
OUT_RING_TABLE( cmdbuf->buf, sz );
ADVANCE_RING();
cmdbuf->buf += sz * sizeof(int);
......@@ -2578,7 +2548,6 @@ static int radeon_emit_packet3( drm_device_t *dev,
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int cmdsz;
int __user *cmd = (int __user *)cmdbuf->buf;
int ret;
RING_LOCALS;
......@@ -2591,7 +2560,7 @@ static int radeon_emit_packet3( drm_device_t *dev,
}
BEGIN_RING( cmdsz );
OUT_RING_USER_TABLE( cmd, cmdsz );
OUT_RING_TABLE( cmdbuf->buf, cmdsz );
ADVANCE_RING();
cmdbuf->buf += cmdsz * 4;
......@@ -2608,7 +2577,6 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev,
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_clip_rect_t box;
unsigned int cmdsz;
int __user *cmd = (int __user *)cmdbuf->buf;
int ret;
drm_clip_rect_t __user *boxes = cmdbuf->boxes;
int i = 0;
......@@ -2627,7 +2595,7 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev,
do {
if ( i < cmdbuf->nbox ) {
if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
if (DRM_COPY_FROM_USER( &box, &boxes[i], sizeof(box) ))
return DRM_ERR(EFAULT);
/* FIXME The second and subsequent times round
* this loop, send a WAIT_UNTIL_3D_IDLE before
......@@ -2650,7 +2618,7 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev,
}
BEGIN_RING( cmdsz );
OUT_RING_USER_TABLE( cmd, cmdsz );
OUT_RING_TABLE( cmdbuf->buf, cmdsz );
ADVANCE_RING();
} while ( ++i < cmdbuf->nbox );
......@@ -2703,7 +2671,8 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
int idx;
drm_radeon_cmd_buffer_t cmdbuf;
drm_radeon_cmd_header_t header;
int orig_nbox;
int orig_nbox, orig_bufsz;
char *kbuf=NULL;
LOCK_TEST_WITH_RETURN( dev, filp );
......@@ -2720,24 +2689,29 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv );
if (cmdbuf.bufsz > 64*1024 || cmdbuf.bufsz<0) {
return DRM_ERR(EINVAL);
}
if (DRM_VERIFYAREA_READ( cmdbuf.buf, cmdbuf.bufsz ))
return DRM_ERR(EFAULT);
if (cmdbuf.nbox &&
DRM_VERIFYAREA_READ(cmdbuf.boxes,
cmdbuf.nbox * sizeof(drm_clip_rect_t)))
/* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
* races between checking values and using those values in other code,
* and simply to avoid a lot of function calls to copy in data.
*/
orig_bufsz = cmdbuf.bufsz;
if (orig_bufsz != 0) {
kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
if (kbuf == NULL)
return DRM_ERR(ENOMEM);
if (DRM_COPY_FROM_USER(kbuf, cmdbuf.buf, cmdbuf.bufsz))
return DRM_ERR(EFAULT);
cmdbuf.buf = kbuf;
}
orig_nbox = cmdbuf.nbox;
while ( cmdbuf.bufsz >= sizeof(header) ) {
if (DRM_GET_USER_UNCHECKED( header.i, (int __user *)cmdbuf.buf )) {
DRM_ERROR("__get_user %p\n", cmdbuf.buf);
return DRM_ERR(EFAULT);
}
header.i = *(int *)cmdbuf.buf;
cmdbuf.buf += sizeof(header);
cmdbuf.bufsz -= sizeof(header);
......@@ -2746,7 +2720,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
DRM_DEBUG("RADEON_CMD_PACKET\n");
if (radeon_emit_packets( dev_priv, filp_priv, header, &cmdbuf )) {
DRM_ERROR("radeon_emit_packets failed\n");
return DRM_ERR(EINVAL);
goto err;
}
break;
......@@ -2754,7 +2728,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
DRM_DEBUG("RADEON_CMD_SCALARS\n");
if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) {
DRM_ERROR("radeon_emit_scalars failed\n");
return DRM_ERR(EINVAL);
goto err;
}
break;
......@@ -2762,7 +2736,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
DRM_DEBUG("RADEON_CMD_VECTORS\n");
if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) {
DRM_ERROR("radeon_emit_vectors failed\n");
return DRM_ERR(EINVAL);
goto err;
}
break;
......@@ -2772,14 +2746,14 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
if ( idx < 0 || idx >= dma->buf_count ) {
DRM_ERROR( "buffer index %d (of %d max)\n",
idx, dma->buf_count - 1 );
return DRM_ERR(EINVAL);
goto err;
}
buf = dma->buflist[idx];
if ( buf->filp != filp || buf->pending ) {
DRM_ERROR( "bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
return DRM_ERR(EINVAL);
goto err;
}
radeon_cp_discard_buffer( dev, buf );
......@@ -2789,7 +2763,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
DRM_DEBUG("RADEON_CMD_PACKET3\n");
if (radeon_emit_packet3( dev, filp_priv, &cmdbuf )) {
DRM_ERROR("radeon_emit_packet3 failed\n");
return DRM_ERR(EINVAL);
goto err;
}
break;
......@@ -2797,7 +2771,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
if (radeon_emit_packet3_cliprect( dev, filp_priv, &cmdbuf, orig_nbox )) {
DRM_ERROR("radeon_emit_packet3_clip failed\n");
return DRM_ERR(EINVAL);
goto err;
}
break;
......@@ -2805,7 +2779,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
DRM_DEBUG("RADEON_CMD_SCALARS2\n");
if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) {
DRM_ERROR("radeon_emit_scalars2 failed\n");
return DRM_ERR(EINVAL);
goto err;
}
break;
......@@ -2813,21 +2787,28 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
DRM_DEBUG("RADEON_CMD_WAIT\n");
if (radeon_emit_wait( dev, header.wait.flags )) {
DRM_ERROR("radeon_emit_wait failed\n");
return DRM_ERR(EINVAL);
goto err;
}
break;
default:
DRM_ERROR("bad cmd_type %d at %p\n",
header.header.cmd_type,
cmdbuf.buf - sizeof(header));
return DRM_ERR(EINVAL);
goto err;
}
}
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
err:
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
return DRM_ERR(EINVAL);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment