Commit d8dbf44f authored by Eric Anholt's avatar Eric Anholt

drm/vc4: Make the CRTCs cooperate on allocating display lists.

So far, we've only ever lit up one CRTC, so this has been fine.  To
extend to more displays or more planes, we need to make sure we don't
run our display lists into each other.
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
parent 6674a904
...@@ -49,22 +49,27 @@ struct vc4_crtc { ...@@ -49,22 +49,27 @@ struct vc4_crtc {
/* Which HVS channel we're using for our CRTC. */ /* Which HVS channel we're using for our CRTC. */
int channel; int channel;
/* Pointer to the actual hardware display list memory for the
* crtc.
*/
u32 __iomem *dlist;
u32 dlist_size; /* in dwords */
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
}; };
struct vc4_crtc_state {
struct drm_crtc_state base;
/* Dlist area for this CRTC configuration. */
struct drm_mm_node mm;
};
static inline struct vc4_crtc * static inline struct vc4_crtc *
to_vc4_crtc(struct drm_crtc *crtc) to_vc4_crtc(struct drm_crtc *crtc)
{ {
return (struct vc4_crtc *)crtc; return (struct vc4_crtc *)crtc;
} }
static inline struct vc4_crtc_state *
to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
{
return (struct vc4_crtc_state *)crtc_state;
}
struct vc4_crtc_data { struct vc4_crtc_data {
/* Which channel of the HVS this pixelvalve sources from. */ /* Which channel of the HVS this pixelvalve sources from. */
int hvs_channel; int hvs_channel;
...@@ -319,11 +324,13 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) ...@@ -319,11 +324,13 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
static int vc4_crtc_atomic_check(struct drm_crtc *crtc, static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state) struct drm_crtc_state *state)
{ {
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane; struct drm_plane *plane;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); unsigned long flags;
u32 dlist_count = 0; u32 dlist_count = 0;
int ret;
/* The pixelvalve can only feed one encoder (and encoders are /* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.) * 1:1 with connectors.)
...@@ -346,18 +353,12 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -346,18 +353,12 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
dlist_count++; /* Account for SCALER_CTL0_END. */ dlist_count++; /* Account for SCALER_CTL0_END. */
if (!vc4_crtc->dlist || dlist_count > vc4_crtc->dlist_size) { spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
vc4_crtc->dlist = ((u32 __iomem *)vc4->hvs->dlist + ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
HVS_BOOTLOADER_DLIST_END); dlist_count, 1, 0);
vc4_crtc->dlist_size = ((SCALER_DLIST_SIZE >> 2) - spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
HVS_BOOTLOADER_DLIST_END); if (ret)
return ret;
if (dlist_count > vc4_crtc->dlist_size) {
DRM_DEBUG_KMS("dlist too large for CRTC (%d > %d).\n",
dlist_count, vc4_crtc->dlist_size);
return -EINVAL;
}
}
return 0; return 0;
} }
...@@ -368,47 +369,29 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, ...@@ -368,47 +369,29 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane; struct drm_plane *plane;
bool debug_dump_regs = false; bool debug_dump_regs = false;
u32 __iomem *dlist_next = vc4_crtc->dlist; u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
if (debug_dump_regs) { if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc)); DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
vc4_hvs_dump_state(dev); vc4_hvs_dump_state(dev);
} }
/* Copy all the active planes' dlist contents to the hardware dlist. /* Copy all the active planes' dlist contents to the hardware dlist. */
*
* XXX: If the new display list was large enough that it
* overlapped a currently-read display list, we need to do
* something like disable scanout before putting in the new
* list. For now, we're safe because we only have the two
* planes.
*/
drm_atomic_crtc_for_each_plane(plane, crtc) { drm_atomic_crtc_for_each_plane(plane, crtc) {
dlist_next += vc4_plane_write_dlist(plane, dlist_next); dlist_next += vc4_plane_write_dlist(plane, dlist_next);
} }
if (dlist_next == vc4_crtc->dlist) {
/* If no planes were enabled, use the SCALER_CTL0_END
* at the start of the display list memory (in the
* bootloader section). We'll rewrite that
* SCALER_CTL0_END, just in case, though.
*/
writel(SCALER_CTL0_END, vc4->hvs->dlist);
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 0);
} else {
writel(SCALER_CTL0_END, dlist_next); writel(SCALER_CTL0_END, dlist_next);
dlist_next++; dlist_next++;
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
(u32 __iomem *)vc4_crtc->dlist -
(u32 __iomem *)vc4->hvs->dlist);
/* Make the next display list start after ours. */ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); vc4_state->mm.start);
vc4_crtc->dlist = dlist_next;
}
if (debug_dump_regs) { if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
...@@ -573,6 +556,36 @@ static int vc4_page_flip(struct drm_crtc *crtc, ...@@ -573,6 +556,36 @@ static int vc4_page_flip(struct drm_crtc *crtc,
return drm_atomic_helper_page_flip(crtc, fb, event, flags); return drm_atomic_helper_page_flip(crtc, fb, event, flags);
} }
static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct vc4_crtc_state *vc4_state;
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
return &vc4_state->base;
}
static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
if (vc4_state->mm.allocated) {
unsigned long flags;
spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
drm_mm_remove_node(&vc4_state->mm);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
}
__drm_atomic_helper_crtc_destroy_state(crtc, state);
}
static const struct drm_crtc_funcs vc4_crtc_funcs = { static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config, .set_config = drm_atomic_helper_set_config,
.destroy = vc4_crtc_destroy, .destroy = vc4_crtc_destroy,
...@@ -581,8 +594,8 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { ...@@ -581,8 +594,8 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */ .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
.cursor_move = NULL, /* handled by drm_mode_cursor_universal */ .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
.reset = drm_atomic_helper_crtc_reset, .reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .atomic_destroy_state = vc4_crtc_destroy_state,
}; };
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
......
...@@ -149,7 +149,13 @@ struct vc4_v3d { ...@@ -149,7 +149,13 @@ struct vc4_v3d {
struct vc4_hvs { struct vc4_hvs {
struct platform_device *pdev; struct platform_device *pdev;
void __iomem *regs; void __iomem *regs;
void __iomem *dlist; u32 __iomem *dlist;
/* Memory manager for CRTCs to allocate space in the display
* list. Units are dwords.
*/
struct drm_mm dlist_mm;
spinlock_t mm_lock;
}; };
struct vc4_plane { struct vc4_plane {
......
...@@ -119,6 +119,17 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) ...@@ -119,6 +119,17 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
hvs->dlist = hvs->regs + SCALER_DLIST_START; hvs->dlist = hvs->regs + SCALER_DLIST_START;
spin_lock_init(&hvs->mm_lock);
/* Set up the HVS display list memory manager. We never
* overwrite the setup from the bootloader (just 128b out of
* our 16K), since we don't want to scramble the screen when
* transitioning from the firmware's boot setup to runtime.
*/
drm_mm_init(&hvs->dlist_mm,
HVS_BOOTLOADER_DLIST_END,
(SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
vc4->hvs = hvs; vc4->hvs = hvs;
return 0; return 0;
} }
...@@ -129,6 +140,8 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, ...@@ -129,6 +140,8 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master); struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private; struct vc4_dev *vc4 = drm->dev_private;
drm_mm_takedown(&vc4->hvs->dlist_mm);
vc4->hvs = NULL; vc4->hvs = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment