Commit 8187a2b7 authored by Zou Nan hai's avatar Zou Nan hai Committed by Eric Anholt

drm/i915: introduce intel_ring_buffer structure (V2)

Introduces a more complete intel_ring_buffer structure with callbacks
for setup and management of a particular ringbuffer, and converts the
render ring buffer consumers to use it.
Signed-off-by: default avatarZou Nan hai <nanhai.zou@intel.com>
Signed-off-by: default avatarXiang Hai hao <haihao.xiang@intel.com>
[anholt: Fixed up whitespace fail and rebased against prep patches]
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
parent d3301d86
...@@ -317,14 +317,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) ...@@ -317,14 +317,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
u8 *virt; u8 *virt;
uint32_t *ptr, off; uint32_t *ptr, off;
if (!dev_priv->render_ring.ring_obj) { if (!dev_priv->render_ring.gem_object) {
seq_printf(m, "No ringbuffer setup\n"); seq_printf(m, "No ringbuffer setup\n");
return 0; return 0;
} }
virt = dev_priv->render_ring.virtual_start; virt = dev_priv->render_ring.virtual_start;
for (off = 0; off < dev_priv->render_ring.Size; off += 4) { for (off = 0; off < dev_priv->render_ring.size; off += 4) {
ptr = (uint32_t *)(virt + off); ptr = (uint32_t *)(virt + off);
seq_printf(m, "%08x : %08x\n", off, *ptr); seq_printf(m, "%08x : %08x\n", off, *ptr);
} }
...@@ -344,7 +344,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) ...@@ -344,7 +344,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, "RingHead : %08x\n", head); seq_printf(m, "RingHead : %08x\n", head);
seq_printf(m, "RingTail : %08x\n", tail); seq_printf(m, "RingTail : %08x\n", tail);
seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.Size); seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
return 0; return 0;
......
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
#include <linux/vga_switcheroo.h> #include <linux/vga_switcheroo.h>
#include <linux/slab.h> #include <linux/slab.h>
/** /**
* Sets up the hardware status page for devices that need a physical address * Sets up the hardware status page for devices that need a physical address
* in the register. * in the register.
...@@ -56,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev) ...@@ -56,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n"); DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM; return -ENOMEM;
} }
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; dev_priv->render_ring.status_page.page_addr
= dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE); memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
if (IS_I965G(dev)) if (IS_I965G(dev))
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
...@@ -95,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev) ...@@ -95,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv; struct drm_i915_master_private *master_priv;
drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring); struct intel_ring_buffer *ring = &dev_priv->render_ring;
/* /*
* We should never lose context on the ring with modesetting * We should never lose context on the ring with modesetting
...@@ -108,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev) ...@@ -108,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8); ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0) if (ring->space < 0)
ring->space += ring->Size; ring->space += ring->size;
if (!dev->primary->master) if (!dev->primary->master)
return; return;
...@@ -128,12 +128,7 @@ static int i915_dma_cleanup(struct drm_device * dev) ...@@ -128,12 +128,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq_enabled) if (dev->irq_enabled)
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
if (dev_priv->render_ring.virtual_start) { intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
drm_core_ioremapfree(&dev_priv->render_ring.map, dev);
dev_priv->render_ring.virtual_start = NULL;
dev_priv->render_ring.map.handle = NULL;
dev_priv->render_ring.map.size = 0;
}
/* Clear the HWS virtual address at teardown */ /* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev)) if (I915_NEED_GFX_HWS(dev))
...@@ -156,14 +151,14 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) ...@@ -156,14 +151,14 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
} }
if (init->ring_size != 0) { if (init->ring_size != 0) {
if (dev_priv->render_ring.ring_obj != NULL) { if (dev_priv->render_ring.gem_object != NULL) {
i915_dma_cleanup(dev); i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in " DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n"); "GEM mode\n");
return -EINVAL; return -EINVAL;
} }
dev_priv->render_ring.Size = init->ring_size; dev_priv->render_ring.size = init->ring_size;
dev_priv->render_ring.map.offset = init->ring_start; dev_priv->render_ring.map.offset = init->ring_start;
dev_priv->render_ring.map.size = init->ring_size; dev_priv->render_ring.map.size = init->ring_size;
...@@ -201,26 +196,29 @@ static int i915_dma_resume(struct drm_device * dev) ...@@ -201,26 +196,29 @@ static int i915_dma_resume(struct drm_device * dev)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct intel_ring_buffer *ring;
DRM_DEBUG_DRIVER("%s\n", __func__); DRM_DEBUG_DRIVER("%s\n", __func__);
if (dev_priv->render_ring.map.handle == NULL) { ring = &dev_priv->render_ring;
if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for" DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n"); " ring buffer\n");
return -ENOMEM; return -ENOMEM;
} }
/* Program Hardware Status Page */ /* Program Hardware Status Page */
if (!dev_priv->hw_status_page) { if (!ring->status_page.page_addr) {
DRM_ERROR("Can not find hardware status page\n"); DRM_ERROR("Can not find hardware status page\n");
return -EINVAL; return -EINVAL;
} }
DRM_DEBUG_DRIVER("hw status page @ %p\n", DRM_DEBUG_DRIVER("hw status page @ %p\n",
dev_priv->hw_status_page); ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
if (dev_priv->status_gfx_addr != 0) ring->setup_status_page(dev, ring);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
else else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page); I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n"); DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0; return 0;
...@@ -330,9 +328,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) ...@@ -330,9 +328,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int i; int i;
RING_LOCALS;
if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.Size - 8) if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
return -EINVAL; return -EINVAL;
BEGIN_LP_RING((dwords+1)&~1); BEGIN_LP_RING((dwords+1)&~1);
...@@ -365,9 +362,7 @@ i915_emit_box(struct drm_device *dev, ...@@ -365,9 +362,7 @@ i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes, struct drm_clip_rect *boxes,
int i, int DR1, int DR4) int i, int DR1, int DR4)
{ {
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box = boxes[i]; struct drm_clip_rect box = boxes[i];
RING_LOCALS;
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n", DRM_ERROR("Bad box %d,%d..%d,%d\n",
...@@ -404,7 +399,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev) ...@@ -404,7 +399,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
RING_LOCALS;
dev_priv->counter++; dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL) if (dev_priv->counter > 0x7FFFFFFFUL)
...@@ -458,10 +452,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, ...@@ -458,10 +452,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch, drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects) struct drm_clip_rect *cliprects)
{ {
drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects; int nbox = batch->num_cliprects;
int i = 0, count; int i = 0, count;
RING_LOCALS;
if ((batch->start | batch->used) & 0x7) { if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment"); DRM_ERROR("alignment");
...@@ -510,7 +502,6 @@ static int i915_dispatch_flip(struct drm_device * dev) ...@@ -510,7 +502,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv; dev->primary->master->driver_priv;
RING_LOCALS;
if (!master_priv->sarea_priv) if (!master_priv->sarea_priv)
return -EINVAL; return -EINVAL;
...@@ -563,7 +554,8 @@ static int i915_quiescent(struct drm_device * dev) ...@@ -563,7 +554,8 @@ static int i915_quiescent(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
i915_kernel_lost_context(dev); i915_kernel_lost_context(dev);
return i915_wait_ring(dev, dev_priv->render_ring.Size - 8, __func__); return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
dev_priv->render_ring.size - 8);
} }
static int i915_flush_ioctl(struct drm_device *dev, void *data, static int i915_flush_ioctl(struct drm_device *dev, void *data,
...@@ -805,6 +797,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, ...@@ -805,6 +797,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data; drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = &dev_priv->render_ring;
if (!I915_NEED_GFX_HWS(dev)) if (!I915_NEED_GFX_HWS(dev))
return -EINVAL; return -EINVAL;
...@@ -821,7 +814,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, ...@@ -821,7 +814,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->base + hws->addr; dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024; dev_priv->hws_map.size = 4*1024;
...@@ -837,10 +830,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data, ...@@ -837,10 +830,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
" G33 hw status page\n"); " G33 hw status page\n");
return -ENOMEM; return -ENOMEM;
} }
dev_priv->hw_status_page = dev_priv->hws_map.handle; ring->status_page.page_addr = dev_priv->hws_map.handle;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
dev_priv->status_gfx_addr); dev_priv->status_gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n", DRM_DEBUG_DRIVER("load hws at %p\n",
...@@ -1639,7 +1632,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1639,7 +1632,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->user_irq_lock); spin_lock_init(&dev_priv->user_irq_lock);
spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->error_lock);
dev_priv->user_irq_refcount = 0;
dev_priv->trace_irq_seqno = 0; dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE); ret = drm_vblank_init(dev, I915_NUM_PIPE);
......
...@@ -388,33 +388,10 @@ int i965_reset(struct drm_device *dev, u8 flags) ...@@ -388,33 +388,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
* switched away). * switched away).
*/ */
if (drm_core_check_feature(dev, DRIVER_MODESET) || if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) { !dev_priv->mm.suspended) {
drm_i915_ring_buffer_t *ring = &dev_priv->render_ring; struct intel_ring_buffer *ring = &dev_priv->render_ring;
struct drm_gem_object *obj = ring->ring_obj;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
dev_priv->mm.suspended = 0; dev_priv->mm.suspended = 0;
ring->init(dev, ring);
/* Stop the ring if it's running. */
I915_WRITE(PRB0_CTL, 0);
I915_WRITE(PRB0_TAIL, 0);
I915_WRITE(PRB0_HEAD, 0);
/* Initialize the ring. */
I915_WRITE(PRB0_START, obj_priv->gtt_offset);
I915_WRITE(PRB0_CTL,
((obj->size - 4096) & RING_NR_PAGES) |
RING_NO_REPORT |
RING_VALID);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
else {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
drm_irq_install(dev); drm_irq_install(dev);
......
...@@ -31,8 +31,8 @@ ...@@ -31,8 +31,8 @@
#define _I915_DRV_H_ #define _I915_DRV_H_
#include "i915_reg.h" #include "i915_reg.h"
#include "i915_drm.h"
#include "intel_bios.h" #include "intel_bios.h"
#include "intel_ringbuffer.h"
#include <linux/io-mapping.h> #include <linux/io-mapping.h>
/* General customization: /* General customization:
...@@ -92,16 +92,6 @@ struct drm_i915_gem_phys_object { ...@@ -92,16 +92,6 @@ struct drm_i915_gem_phys_object {
struct drm_gem_object *cur_obj; struct drm_gem_object *cur_obj;
}; };
typedef struct _drm_i915_ring_buffer {
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
drm_local_map_t map;
struct drm_gem_object *ring_obj;
} drm_i915_ring_buffer_t;
struct mem_block { struct mem_block {
struct mem_block *next; struct mem_block *next;
struct mem_block *prev; struct mem_block *prev;
...@@ -244,7 +234,7 @@ typedef struct drm_i915_private { ...@@ -244,7 +234,7 @@ typedef struct drm_i915_private {
void __iomem *regs; void __iomem *regs;
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
drm_i915_ring_buffer_t render_ring; struct intel_ring_buffer render_ring;
drm_dma_handle_t *status_page_dmah; drm_dma_handle_t *status_page_dmah;
void *hw_status_page; void *hw_status_page;
...@@ -270,8 +260,6 @@ typedef struct drm_i915_private { ...@@ -270,8 +260,6 @@ typedef struct drm_i915_private {
atomic_t irq_received; atomic_t irq_received;
/** Protects user_irq_refcount and irq_mask_reg */ /** Protects user_irq_refcount and irq_mask_reg */
spinlock_t user_irq_lock; spinlock_t user_irq_lock;
/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
int user_irq_refcount;
u32 trace_irq_seqno; u32 trace_irq_seqno;
/** Cached value of IMR to avoid reads in updating the bitfield */ /** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg; u32 irq_mask_reg;
...@@ -832,9 +820,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, ...@@ -832,9 +820,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data, extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_user_irq_get(struct drm_device *dev);
void i915_trace_irq_get(struct drm_device *dev, u32 seqno); void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
void i915_user_irq_put(struct drm_device *dev);
extern void i915_enable_interrupt (struct drm_device *dev); extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
...@@ -853,8 +839,10 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data, ...@@ -853,8 +839,10 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask); extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask); u32 mask);
extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
u32 mask);
void void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
...@@ -962,8 +950,6 @@ void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); ...@@ -962,8 +950,6 @@ void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void); void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void); void i915_gem_shrinker_exit(void);
int i915_gem_init_pipe_control(struct drm_device *dev);
void i915_gem_cleanup_pipe_control(struct drm_device *dev);
/* i915_gem_tiling.c */ /* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
...@@ -1014,16 +1000,6 @@ static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; ...@@ -1014,16 +1000,6 @@ static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return;
static inline void opregion_enable_asle(struct drm_device *dev) { return; } static inline void opregion_enable_asle(struct drm_device *dev) { return; }
#endif #endif
/* intel_ringbuffer.c */
extern void i915_gem_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains);
extern int i915_dispatch_gem_execbuffer(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset);
extern uint32_t i915_ring_add_request(struct drm_device *dev);
/* modesetting */ /* modesetting */
extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev);
...@@ -1044,7 +1020,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); ...@@ -1044,7 +1020,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
* has access to the ring. * has access to the ring.
*/ */
#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
if (((drm_i915_private_t *)dev->dev_private)->render_ring.ring_obj == NULL) \ if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
== NULL) \
LOCK_TEST_WITH_RETURN(dev, file_priv); \ LOCK_TEST_WITH_RETURN(dev, file_priv); \
} while (0) } while (0)
...@@ -1060,32 +1037,27 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); ...@@ -1060,32 +1037,27 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define I915_VERBOSE 0 #define I915_VERBOSE 0
#define RING_LOCALS volatile unsigned int *ring_virt__; #define BEGIN_LP_RING(n) do { \
drm_i915_private_t *dev_priv = dev->dev_private; \
#define BEGIN_LP_RING(n) do { \ if (I915_VERBOSE) \
int bytes__ = 4*(n); \ DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \
/* a wrap must occur between instructions so pad beforehand */ \
if (unlikely (dev_priv->render_ring.tail + bytes__ > dev_priv->render_ring.Size)) \
i915_wrap_ring(dev); \
if (unlikely (dev_priv->render_ring.space < bytes__)) \
i915_wait_ring(dev, bytes__, __func__); \
ring_virt__ = (unsigned int *) \
(dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail); \
dev_priv->render_ring.tail += bytes__; \
dev_priv->render_ring.tail &= dev_priv->render_ring.Size - 1; \
dev_priv->render_ring.space -= bytes__; \
} while (0) } while (0)
#define OUT_RING(n) do { \
if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ #define OUT_RING(x) do { \
*ring_virt__++ = (n); \ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
intel_ring_emit(dev, &dev_priv->render_ring, x); \
} while (0) } while (0)
#define ADVANCE_LP_RING() do { \ #define ADVANCE_LP_RING() do { \
drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \ if (I915_VERBOSE) \
DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->render_ring.tail); \ DRM_DEBUG("ADVANCE_LP_RING %x\n", \
I915_WRITE(PRB0_TAIL, dev_priv->render_ring.tail); \ dev_priv->render_ring.tail); \
intel_ring_advance(dev, &dev_priv->render_ring); \
} while(0) } while(0)
/** /**
...@@ -1103,14 +1075,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); ...@@ -1103,14 +1075,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
* *
* The area from dword 0x20 to 0x3ff is available for driver usage. * The area from dword 0x20 to 0x3ff is available for driver usage.
*/ */
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
(dev_priv->render_ring.status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20 #define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21 #define I915_BREADCRUMB_INDEX 0x21
extern int i915_wrap_ring(struct drm_device * dev);
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_I830(dev) ((dev)->pci_device == 0x3577)
......
...@@ -1590,6 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, ...@@ -1590,6 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
} }
} }
} }
uint32_t uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv, i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t flush_domains) uint32_t flush_domains)
...@@ -1607,7 +1608,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, ...@@ -1607,7 +1608,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (request == NULL) if (request == NULL)
return 0; return 0;
seqno = i915_ring_add_request(dev); seqno = dev_priv->render_ring.add_request(dev, &dev_priv->render_ring,
file_priv, flush_domains);
DRM_DEBUG_DRIVER("%d\n", seqno); DRM_DEBUG_DRIVER("%d\n", seqno);
...@@ -1645,10 +1647,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, ...@@ -1645,10 +1647,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
static uint32_t static uint32_t
i915_retire_commands(struct drm_device *dev) i915_retire_commands(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
RING_LOCALS;
/* The sampler always gets flushed on i965 (sigh) */ /* The sampler always gets flushed on i965 (sigh) */
if (IS_I965G(dev)) if (IS_I965G(dev))
...@@ -1746,7 +1746,9 @@ i915_gem_retire_requests(struct drm_device *dev) ...@@ -1746,7 +1746,9 @@ i915_gem_retire_requests(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno; uint32_t seqno;
if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) struct intel_ring_buffer *ring = &(dev_priv->render_ring);
if (!ring->status_page.page_addr
|| list_empty(&dev_priv->mm.request_list))
return; return;
seqno = i915_get_gem_seqno(dev); seqno = i915_get_gem_seqno(dev);
...@@ -1773,7 +1775,8 @@ i915_gem_retire_requests(struct drm_device *dev) ...@@ -1773,7 +1775,8 @@ i915_gem_retire_requests(struct drm_device *dev)
if (unlikely (dev_priv->trace_irq_seqno && if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
i915_user_irq_put(dev);
ring->user_irq_put(dev, ring);
dev_priv->trace_irq_seqno = 0; dev_priv->trace_irq_seqno = 0;
} }
} }
...@@ -1803,6 +1806,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) ...@@ -1803,6 +1806,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
u32 ier; u32 ier;
int ret = 0; int ret = 0;
struct intel_ring_buffer *ring = &dev_priv->render_ring;
BUG_ON(seqno == 0); BUG_ON(seqno == 0);
if (atomic_read(&dev_priv->mm.wedged)) if (atomic_read(&dev_priv->mm.wedged))
...@@ -1823,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) ...@@ -1823,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
trace_i915_gem_request_wait_begin(dev, seqno); trace_i915_gem_request_wait_begin(dev, seqno);
dev_priv->mm.waiting_gem_seqno = seqno; dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev); ring->user_irq_get(dev, ring);
if (interruptible) if (interruptible)
ret = wait_event_interruptible(dev_priv->irq_queue, ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
...@@ -1833,7 +1837,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) ...@@ -1833,7 +1837,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
atomic_read(&dev_priv->mm.wedged)); atomic_read(&dev_priv->mm.wedged));
i915_user_irq_put(dev); ring->user_irq_put(dev, ring);
dev_priv->mm.waiting_gem_seqno = 0; dev_priv->mm.waiting_gem_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno); trace_i915_gem_request_wait_end(dev, seqno);
...@@ -1867,6 +1871,19 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) ...@@ -1867,6 +1871,19 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
} }
static void
i915_gem_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
invalidate_domains,
flush_domains);
}
/** /**
* Ensures that all rendering to the object has completed and the object is * Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU. * safe to unbind from the GTT or access from the CPU.
...@@ -3820,7 +3837,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -3820,7 +3837,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
#endif #endif
/* Exec the batchbuffer */ /* Exec the batchbuffer */
ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); ret = dev_priv->render_ring.dispatch_gem_execbuffer(dev,
&dev_priv->render_ring,
args,
cliprects,
exec_offset);
if (ret) { if (ret) {
DRM_ERROR("dispatch failed %d\n", ret); DRM_ERROR("dispatch failed %d\n", ret);
goto err; goto err;
...@@ -4378,7 +4399,8 @@ i915_gem_idle(struct drm_device *dev) ...@@ -4378,7 +4399,8 @@ i915_gem_idle(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (dev_priv->mm.suspended || dev_priv->render_ring.ring_obj == NULL) { if (dev_priv->mm.suspended ||
dev_priv->render_ring.gem_object == NULL) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -4420,7 +4442,7 @@ i915_gem_idle(struct drm_device *dev) ...@@ -4420,7 +4442,7 @@ i915_gem_idle(struct drm_device *dev)
* 965+ support PIPE_CONTROL commands, which provide finer grained control * 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing. * over cache flushing.
*/ */
int static int
i915_gem_init_pipe_control(struct drm_device *dev) i915_gem_init_pipe_control(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -4459,7 +4481,8 @@ i915_gem_init_pipe_control(struct drm_device *dev) ...@@ -4459,7 +4481,8 @@ i915_gem_init_pipe_control(struct drm_device *dev)
return ret; return ret;
} }
void
static void
i915_gem_cleanup_pipe_control(struct drm_device *dev) i915_gem_cleanup_pipe_control(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -4476,6 +4499,37 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev) ...@@ -4476,6 +4499,37 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
dev_priv->seqno_page = NULL; dev_priv->seqno_page = NULL;
} }
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
dev_priv->render_ring = render_ring;
if (!I915_NEED_GFX_HWS(dev)) {
dev_priv->render_ring.status_page.page_addr
= dev_priv->status_page_dmah->vaddr;
memset(dev_priv->render_ring.status_page.page_addr,
0, PAGE_SIZE);
}
if (HAS_PIPE_CONTROL(dev)) {
ret = i915_gem_init_pipe_control(dev);
if (ret)
return ret;
}
ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
return ret;
}
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
if (HAS_PIPE_CONTROL(dev))
i915_gem_cleanup_pipe_control(dev);
}
int int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data, i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
......
...@@ -545,7 +545,8 @@ i915_ringbuffer_last_batch(struct drm_device *dev) ...@@ -545,7 +545,8 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
} }
if (bbaddr == 0) { if (bbaddr == 0) {
ring = (u32 *)(dev_priv->render_ring.virtual_start + dev_priv->render_ring.Size); ring = (u32 *)(dev_priv->render_ring.virtual_start
+ dev_priv->render_ring.size);
while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
bbaddr = i915_get_bbaddr(dev, ring); bbaddr = i915_get_bbaddr(dev, ring);
if (bbaddr) if (bbaddr)
...@@ -639,7 +640,8 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -639,7 +640,8 @@ static void i915_capture_error_state(struct drm_device *dev)
error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
/* Record the ringbuffer */ /* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev, dev_priv->render_ring.ring_obj); error->ringbuffer = i915_error_object_create(dev,
dev_priv->render_ring.gem_object);
/* Record buffers on the active list. */ /* Record buffers on the active list. */
error->active_bo = NULL; error->active_bo = NULL;
...@@ -984,7 +986,6 @@ static int i915_emit_irq(struct drm_device * dev) ...@@ -984,7 +986,6 @@ static int i915_emit_irq(struct drm_device * dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
RING_LOCALS;
i915_kernel_lost_context(dev); i915_kernel_lost_context(dev);
...@@ -1009,9 +1010,10 @@ static int i915_emit_irq(struct drm_device * dev) ...@@ -1009,9 +1010,10 @@ static int i915_emit_irq(struct drm_device * dev)
void i915_trace_irq_get(struct drm_device *dev, u32 seqno) void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
if (dev_priv->trace_irq_seqno == 0) if (dev_priv->trace_irq_seqno == 0)
i915_user_irq_get(dev); render_ring->user_irq_get(dev, render_ring);
dev_priv->trace_irq_seqno = seqno; dev_priv->trace_irq_seqno = seqno;
} }
...@@ -1021,6 +1023,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) ...@@ -1021,6 +1023,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0; int ret = 0;
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv)); READ_BREADCRUMB(dev_priv));
...@@ -1034,10 +1037,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) ...@@ -1034,10 +1037,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv) if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
i915_user_irq_get(dev); render_ring->user_irq_get(dev, render_ring);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr); READ_BREADCRUMB(dev_priv) >= irq_nr);
i915_user_irq_put(dev); render_ring->user_irq_put(dev, render_ring);
if (ret == -EBUSY) { if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
......
...@@ -4629,7 +4629,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -4629,7 +4629,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags; unsigned long flags;
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
int ret, pipesrc; int ret, pipesrc;
RING_LOCALS;
work = kzalloc(sizeof *work, GFP_KERNEL); work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) if (work == NULL)
......
...@@ -211,9 +211,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) ...@@ -211,9 +211,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
static int intel_overlay_on(struct intel_overlay *overlay) static int intel_overlay_on(struct intel_overlay *overlay)
{ {
struct drm_device *dev = overlay->dev; struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
RING_LOCALS;
BUG_ON(overlay->active); BUG_ON(overlay->active);
...@@ -248,7 +246,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay, ...@@ -248,7 +246,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
u32 flip_addr = overlay->flip_addr; u32 flip_addr = overlay->flip_addr;
u32 tmp; u32 tmp;
RING_LOCALS;
BUG_ON(!overlay->active); BUG_ON(!overlay->active);
...@@ -274,7 +271,6 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) ...@@ -274,7 +271,6 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
u32 tmp; u32 tmp;
RING_LOCALS;
if (overlay->last_flip_req != 0) { if (overlay->last_flip_req != 0) {
ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
...@@ -314,9 +310,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) ...@@ -314,9 +310,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{ {
u32 flip_addr = overlay->flip_addr; u32 flip_addr = overlay->flip_addr;
struct drm_device *dev = overlay->dev; struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
RING_LOCALS;
BUG_ON(!overlay->active); BUG_ON(!overlay->active);
...@@ -390,11 +384,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, ...@@ -390,11 +384,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
int interruptible) int interruptible)
{ {
struct drm_device *dev = overlay->dev; struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj; struct drm_gem_object *obj;
u32 flip_addr; u32 flip_addr;
int ret; int ret;
RING_LOCALS;
if (overlay->hw_wedged == HW_WEDGED) if (overlay->hw_wedged == HW_WEDGED)
return -EIO; return -EIO;
......
This diff is collapsed.
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
struct intel_hw_status_page {
void *page_addr;
unsigned int gfx_addr;
struct drm_gem_object *obj;
};
struct drm_i915_gem_execbuffer2;
struct intel_ring_buffer {
const char *name;
struct ring_regs {
u32 ctl;
u32 head;
u32 tail;
u32 start;
} regs;
unsigned int ring_flag;
unsigned long size;
unsigned int alignment;
void *virtual_start;
struct drm_device *dev;
struct drm_gem_object *gem_object;
unsigned int head;
unsigned int tail;
unsigned int space;
u32 next_seqno;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
u32 waiting_gem_seqno;
int user_irq_refcount;
void (*user_irq_get)(struct drm_device *dev,
struct intel_ring_buffer *ring);
void (*user_irq_put)(struct drm_device *dev,
struct intel_ring_buffer *ring);
void (*setup_status_page)(struct drm_device *dev,
struct intel_ring_buffer *ring);
int (*init)(struct drm_device *dev,
struct intel_ring_buffer *ring);
unsigned int (*get_head)(struct drm_device *dev,
struct intel_ring_buffer *ring);
unsigned int (*get_tail)(struct drm_device *dev,
struct intel_ring_buffer *ring);
unsigned int (*get_active_head)(struct drm_device *dev,
struct intel_ring_buffer *ring);
void (*advance_ring)(struct drm_device *dev,
struct intel_ring_buffer *ring);
void (*flush)(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
u32 (*add_request)(struct drm_device *dev,
struct intel_ring_buffer *ring,
struct drm_file *file_priv,
u32 flush_domains);
u32 (*get_gem_seqno)(struct drm_device *dev,
struct intel_ring_buffer *ring);
int (*dispatch_gem_execbuffer)(struct drm_device *dev,
struct intel_ring_buffer *ring,
struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset);
/**
* List of objects currently involved in rendering from the
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head request_list;
wait_queue_head_t irq_queue;
drm_local_map_t map;
};
static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
u32 *regs = ring->status_page.page_addr;
return regs[reg];
}
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring);
void intel_cleanup_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring);
int intel_wait_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring, int n);
int intel_wrap_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring);
void intel_ring_begin(struct drm_device *dev,
struct intel_ring_buffer *ring, int n);
void intel_ring_emit(struct drm_device *dev,
struct intel_ring_buffer *ring, u32 data);
void intel_fill_struct(struct drm_device *dev,
struct intel_ring_buffer *ring,
void *data,
unsigned int len);
void intel_ring_advance(struct drm_device *dev,
struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring);
extern struct intel_ring_buffer render_ring;
extern struct intel_ring_buffer bsd_ring;
#endif /* _INTEL_RINGBUFFER_H_ */
...@@ -616,7 +616,9 @@ struct drm_i915_gem_execbuffer2 { ...@@ -616,7 +616,9 @@ struct drm_i915_gem_execbuffer2 {
__u32 num_cliprects; __u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */ /** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr; __u64 cliprects_ptr;
__u64 flags; /* currently unused */ #define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (1<<1)
__u64 flags;
__u64 rsvd1; __u64 rsvd1;
__u64 rsvd2; __u64 rsvd2;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment