Commit 26398db1 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2020-03-05' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Fixes for v5.6.rc5:
- dma-buf fix memory leak
- Fix resource id creation race in virtio.
- Various mmap fixes.
- Fix fence leak in ttm_buffer_object_transfer().
- Fixes for sun4i VI layer format support.
- kirin: Revert "Fix for hikey620 display offset problem"
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/56de63c7-0cdf-5805-e268-44944af7fef2@linux.intel.com
parents 70b8ea1a 1b79cfd9
......@@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
dma_resv_fini(dmabuf->resv);
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
return 0;
}
......
......@@ -210,8 +210,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
dpcd[0] = dp_bw;
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
......
......@@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
if (ret)
goto err_zero_use;
if (obj->import_attach)
if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
else
} else {
pgprot_t prot = PAGE_KERNEL;
if (!shmem->map_cached)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
VM_MAP, prot);
}
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
......@@ -540,8 +545,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
}
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (!shmem->map_cached)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
......
......@@ -83,7 +83,6 @@
#define VSIZE_OFST 20
#define LDI_INT_EN 0x741C
#define FRAME_END_INT_EN_OFST 1
#define UNDERFLOW_INT_EN_OFST 2
#define LDI_CTRL 0x7420
#define BPP_OFST 3
#define DATA_GATE_EN BIT(2)
......
......@@ -46,7 +46,6 @@ struct ade_hw_ctx {
struct clk *media_noc_clk;
struct clk *ade_pix_clk;
struct reset_control *reset;
struct work_struct display_reset_wq;
bool power_on;
int irq;
......@@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx)
*/
ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
}
static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
......@@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
MASK(1), 0);
}
static void drm_underflow_wq(struct work_struct *work)
{
struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
display_reset_wq);
struct drm_device *drm_dev = ctx->crtc->dev;
struct drm_atomic_state *state;
state = drm_atomic_helper_suspend(drm_dev);
drm_atomic_helper_resume(drm_dev, state);
}
static irqreturn_t ade_irq_handler(int irq, void *data)
{
struct ade_hw_ctx *ctx = data;
......@@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
MASK(1), 1);
drm_crtc_handle_vblank(crtc);
}
if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
MASK(1), 1);
DRM_ERROR("LDI underflow!");
schedule_work(&ctx->display_reset_wq);
}
return IRQ_HANDLED;
}
......@@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
if (ret)
return ERR_PTR(-EIO);
INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
ctx->crtc = crtc;
return ctx;
......
......@@ -601,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
source_id = (fault_status >> 16);
/* Page fault only */
if ((status & mask) == BIT(i)) {
WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
ret = -1;
if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
if (!ret) {
mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
status &= ~mask;
continue;
}
}
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
"decoded fault status: %s\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
i, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
exception_type, panfrost_exception_name(pfdev, exception_type),
access_type, access_type_name(pfdev, fault_status),
source_id);
if (ret)
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
"decoded fault status: %s\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
i, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
exception_type, panfrost_exception_name(pfdev, exception_type),
access_type, access_type_name(pfdev, fault_status),
source_id);
mmu_write(pfdev, MMU_INT_CLEAR, mask);
......
......@@ -106,48 +106,128 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
......@@ -196,12 +276,6 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
.rgb = true,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
......@@ -220,12 +294,6 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
.rgb = true,
.csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
......@@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_P010,
.de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
.rgb = false,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_P210,
.de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
.rgb = false,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
};
const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
......
......@@ -93,6 +93,10 @@
#define SUN8I_MIXER_FBFMT_ABGR1555 17
#define SUN8I_MIXER_FBFMT_RGBA5551 18
#define SUN8I_MIXER_FBFMT_BGRA5551 19
#define SUN8I_MIXER_FBFMT_ARGB2101010 20
#define SUN8I_MIXER_FBFMT_ABGR2101010 21
#define SUN8I_MIXER_FBFMT_RGBA1010102 22
#define SUN8I_MIXER_FBFMT_BGRA1010102 23
#define SUN8I_MIXER_FBFMT_YUYV 0
#define SUN8I_MIXER_FBFMT_UYVY 1
......@@ -109,6 +113,13 @@
/* format 12 is semi-planar YUV411 UVUV */
/* format 13 is semi-planar YUV411 VUVU */
#define SUN8I_MIXER_FBFMT_YUV411 14
/* format 15 doesn't exist */
/* format 16 is P010 YVU */
#define SUN8I_MIXER_FBFMT_P010_YUV 17
/* format 18 is P210 YVU */
#define SUN8I_MIXER_FBFMT_P210_YUV 19
/* format 20 is packed YVU444 10-bit */
/* format 21 is packed YUV444 10-bit */
/*
* Sub-engines listed bellow are unused for now. The EN registers are here only
......
......@@ -398,24 +398,66 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
};
/*
* While all RGB formats are supported, VI planes don't support
* alpha blending, so there is no point having formats with alpha
* channel if their opaque analog exist.
* While DE2 VI layer supports same RGB formats as UI layer, alpha
* channel is ignored. This structure lists all unique variants
* where alpha channel is replaced with "don't care" (X) channel.
*/
static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_BGRX4444,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_XBGR4444,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_NV16,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
};
static const u32 sun8i_vi_layer_de3_formats[] = {
DRM_FORMAT_ABGR1555,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_BGRA1010102,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGBA1010102,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
......@@ -424,6 +466,8 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
DRM_FORMAT_P010,
DRM_FORMAT_P210,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
......@@ -431,11 +475,9 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YUV444,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
DRM_FORMAT_YVU444,
};
struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
......@@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
int index)
{
u32 supported_encodings, supported_ranges;
unsigned int plane_cnt, format_count;
struct sun8i_vi_layer *layer;
unsigned int plane_cnt;
const u32 *formats;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
if (mixer->cfg->is_de3) {
formats = sun8i_vi_layer_de3_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
} else {
formats = sun8i_vi_layer_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
}
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
sun8i_vi_layer_formats,
ARRAY_SIZE(sun8i_vi_layer_formats),
formats, format_count,
NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
......
......@@ -515,6 +515,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
......
......@@ -42,8 +42,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
* "f91a9dd35715 Fix unlinking resources from hash
* table." (Feb 2019) fixes the bug.
*/
static int handle;
handle++;
static atomic_t seqno = ATOMIC_INIT(0);
int handle = atomic_inc_return(&seqno);
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
......@@ -99,6 +99,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
return NULL;
bo->base.base.funcs = &virtio_gpu_gem_funcs;
bo->base.map_cached = true;
return &bo->base.base;
}
......
......@@ -96,6 +96,11 @@ struct drm_gem_shmem_object {
* The address are un-mapped when the count reaches zero.
*/
unsigned int vmap_use_count;
/**
* @map_cached: map object cached (instead of using writecombine).
*/
bool map_cached;
};
#define to_drm_gem_shmem_obj(obj) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment