Commit e81df5bc authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-next-fixes-2021-02-25' of...

Merge tag 'drm-misc-next-fixes-2021-02-25' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next tasty fixes for v5.12:
- Cherry pick of drm-misc-fixes pull:
"here's this week's PR for drm-misc-fixes. One of the patches is a memory
leak; the rest is for hardware issues."
- Fix dt bindings for dp connector.
- Fix build error in atyfb.
- Improve error handling for dma-buf heaps.
- Make vblank timestamp more correct, by recording timestamp to be set when signaling.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/0f60a68c-d562-7266-0815-ea75ff680b17@linux.intel.com
parents 12458e35 d922d58f
......@@ -26,7 +26,6 @@ properties:
dp-pwr-supply:
description: Power supply for the DP_PWR pin
maxItems: 1
port:
$ref: /schemas/graph.yaml#/properties/port
......
......@@ -312,22 +312,25 @@ void __dma_fence_might_wait(void)
/**
* dma_fence_signal_locked - signal completion of a fence
* dma_fence_signal_timestamp_locked - signal completion of a fence
* @fence: the fence to signal
* @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
*
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from the unsignaled to the signaled state and not back, it will
* only be effective the first time.
* only be effective the first time. Set the timestamp provided as the fence
* signal timestamp.
*
* Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
* held.
* Unlike dma_fence_signal_timestamp(), this function must be called with
* &dma_fence.lock held.
*
* Returns 0 on success and a negative error value when @fence has been
* signalled already.
*/
int dma_fence_signal_locked(struct dma_fence *fence)
int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
ktime_t timestamp)
{
struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
......@@ -341,7 +344,7 @@ int dma_fence_signal_locked(struct dma_fence *fence)
/* Stash the cb_list before replacing it with the timestamp */
list_replace(&fence->cb_list, &cb_list);
fence->timestamp = ktime_get();
fence->timestamp = timestamp;
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
......@@ -352,6 +355,59 @@ int dma_fence_signal_locked(struct dma_fence *fence)
return 0;
}
EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
/**
* dma_fence_signal_timestamp - signal completion of a fence
* @fence: the fence to signal
* @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
*
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from the unsignaled to the signaled state and not back, it will
* only be effective the first time. Set the timestamp provided as the fence
* signal timestamp.
*
* Returns 0 on success and a negative error value when @fence has been
* signalled already.
*/
int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
{
unsigned long flags;
int ret;
if (!fence)
return -EINVAL;
spin_lock_irqsave(fence->lock, flags);
ret = dma_fence_signal_timestamp_locked(fence, timestamp);
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
EXPORT_SYMBOL(dma_fence_signal_timestamp);
/**
* dma_fence_signal_locked - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from the unsignaled to the signaled state and not back, it will
* only be effective the first time.
*
* Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
* held.
*
* Returns 0 on success and a negative error value when @fence has been
* signalled already.
*/
int dma_fence_signal_locked(struct dma_fence *fence)
{
return dma_fence_signal_timestamp_locked(fence, ktime_get());
}
EXPORT_SYMBOL(dma_fence_signal_locked);
/**
......@@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence)
tmp = dma_fence_begin_signalling();
spin_lock_irqsave(fence->lock, flags);
ret = dma_fence_signal_locked(fence);
ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_end_signalling(tmp);
......
......@@ -52,6 +52,9 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
unsigned int fd_flags,
unsigned int heap_flags)
{
struct dma_buf *dmabuf;
int fd;
/*
* Allocations from all heaps have to begin
* and end on page boundaries.
......@@ -60,7 +63,16 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
if (!len)
return -EINVAL;
return heap->ops->allocate(heap, len, fd_flags, heap_flags);
dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
fd = dma_buf_fd(dmabuf, fd_flags);
if (fd < 0) {
dma_buf_put(dmabuf);
/* just return, as put will call release and that will free */
}
return fd;
}
static int dma_heap_open(struct inode *inode, struct file *file)
......
......@@ -271,10 +271,10 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
.release = cma_heap_dma_buf_release,
};
static int cma_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
struct cma_heap_buffer *buffer;
......@@ -289,7 +289,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
......@@ -348,15 +348,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
/* just return, as put will call release and that will free */
return ret;
}
return ret;
return dmabuf;
free_pages:
kfree(buffer->pages);
......@@ -365,7 +357,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
free_buffer:
kfree(buffer);
return ret;
return ERR_PTR(ret);
}
static const struct dma_heap_ops cma_heap_ops = {
......
......@@ -331,10 +331,10 @@ static struct page *alloc_largest_available(unsigned long size,
return NULL;
}
static int system_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
{
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
......@@ -349,7 +349,7 @@ static int system_heap_allocate(struct dma_heap *heap,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
......@@ -363,8 +363,10 @@ static int system_heap_allocate(struct dma_heap *heap,
* Avoid trying to allocate memory if the process
* has been killed by SIGKILL
*/
if (fatal_signal_pending(current))
if (fatal_signal_pending(current)) {
ret = -EINTR;
goto free_buffer;
}
page = alloc_largest_available(size_remaining, max_order);
if (!page)
......@@ -397,14 +399,7 @@ static int system_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
/* just return, as put will call release and that will free */
return ret;
}
return ret;
return dmabuf;
free_pages:
for_each_sgtable_sg(table, sg, i) {
......@@ -418,7 +413,7 @@ static int system_heap_allocate(struct dma_heap *heap,
__free_pages(page, compound_order(page));
kfree(buffer);
return ret;
return ERR_PTR(ret);
}
static const struct dma_heap_ops system_heap_ops = {
......
......@@ -775,20 +775,19 @@ void drm_event_cancel_free(struct drm_device *dev,
EXPORT_SYMBOL(drm_event_cancel_free);
/**
* drm_send_event_locked - send DRM event to file descriptor
* drm_send_event_helper - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
* @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
* time domain
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. Callers must already hold
* &drm_device.event_lock, see drm_send_event() for the unlocked version.
*
* Note that the core will take care of unlinking and disarming events when the
* corresponding DRM file is closed. Drivers need not worry about whether the
* DRM file for this event still exists and can call this function upon
* completion of the asynchronous work unconditionally.
* This helper function sends the event @e, initialized with
* drm_event_reserve_init(), to its associated userspace DRM file.
* The timestamp variant of dma_fence_signal is used when the caller
* sends a valid timestamp.
*/
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
void drm_send_event_helper(struct drm_device *dev,
struct drm_pending_event *e, ktime_t timestamp)
{
assert_spin_locked(&dev->event_lock);
......@@ -799,7 +798,10 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
dma_fence_signal(e->fence);
if (timestamp)
dma_fence_signal_timestamp(e->fence, timestamp);
else
dma_fence_signal(e->fence);
dma_fence_put(e->fence);
}
......@@ -814,6 +816,48 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
wake_up_interruptible_poll(&e->file_priv->event_wait,
EPOLLIN | EPOLLRDNORM);
}
/**
* drm_send_event_timestamp_locked - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
* @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
* time domain
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. Callers must already hold
* &drm_device.event_lock.
*
* Note that the core will take care of unlinking and disarming events when the
* corresponding DRM file is closed. Drivers need not worry about whether the
* DRM file for this event still exists and can call this function upon
* completion of the asynchronous work unconditionally.
*/
void drm_send_event_timestamp_locked(struct drm_device *dev,
struct drm_pending_event *e, ktime_t timestamp)
{
drm_send_event_helper(dev, e, timestamp);
}
EXPORT_SYMBOL(drm_send_event_timestamp_locked);
/**
* drm_send_event_locked - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. Callers must already hold
* &drm_device.event_lock, see drm_send_event() for the unlocked version.
*
* Note that the core will take care of unlinking and disarming events when the
* corresponding DRM file is closed. Drivers need not worry about whether the
* DRM file for this event still exists and can call this function upon
* completion of the asynchronous work unconditionally.
*/
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
{
drm_send_event_helper(dev, e, 0);
}
EXPORT_SYMBOL(drm_send_event_locked);
/**
......@@ -836,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
unsigned long irqflags;
spin_lock_irqsave(&dev->event_lock, irqflags);
drm_send_event_locked(dev, e);
drm_send_event_helper(dev, e, 0);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);
......
......@@ -1006,7 +1006,14 @@ static void send_vblank_event(struct drm_device *dev,
break;
}
trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
drm_send_event_locked(dev, &e->base);
/*
* Use the same timestamp for any associated fence signal to avoid
* mismatch in timestamps for vsync & fence events triggered by the
* same HW event. Frameworks like SurfaceFlinger in Android expects the
* retire-fence timestamp to match exactly with HW vsync as it uses it
* for its software vsync modeling.
*/
drm_send_event_timestamp_locked(dev, &e->base, now);
}
/**
......
......@@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 1;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
DRM_MODE_CONNECTOR_DSI);
......
......@@ -17,9 +17,20 @@
#define NUM_YUV2YUV_COEFFICIENTS 12
/* AFBC supports a number of configurable modes. Relevant to us is block size
* (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like
* colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode
* could be enabled via the hreg_block_split register, but is not currently
* handled. The colourspace transform is implicitly always assumed by the
* decoder, so consumers must use this transform as well.
*
* Failure to match modifiers will cause errors displaying AFBC buffers
* produced by conformant AFBC producers, including Mesa.
*/
#define ROCKCHIP_AFBC_MOD \
DRM_FORMAT_MOD_ARM_AFBC( \
AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
| AFBC_FORMAT_MOD_YTR \
)
enum vop_data_format {
......
......@@ -959,8 +959,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
return ret;
/* move to the bounce domain */
ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
if (ret)
if (ret) {
ttm_resource_free(bo, &hop_mem);
return ret;
}
return 0;
}
......@@ -991,18 +993,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* stop and the driver will be called to make
* the second hop.
*/
bounce:
ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
if (ret)
return ret;
bounce:
ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
if (ret)
return ret;
goto out;
/* try and move to final place now. */
goto bounce;
}
out:
if (ret)
ttm_resource_free(bo, &mem);
return ret;
......
......@@ -175,6 +175,15 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par)
return aty_ld_le32(LCD_DATA, par);
}
}
#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) \
defined(CONFIG_FB_ATY_GENERIC_LCD) */
void aty_st_lcd(int index, u32 val, const struct atyfb_par *par)
{ }
u32 aty_ld_lcd(int index, const struct atyfb_par *par)
{
return 0;
}
#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
#ifdef CONFIG_FB_ATY_GENERIC_LCD
......
......@@ -399,6 +399,9 @@ void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p);
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event_timestamp_locked(struct drm_device *dev,
struct drm_pending_event *e,
ktime_t timestamp);
struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
......
......@@ -372,6 +372,9 @@ static inline void __dma_fence_might_wait(void) {}
int dma_fence_signal(struct dma_fence *fence);
int dma_fence_signal_locked(struct dma_fence *fence);
int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
......
......@@ -16,15 +16,15 @@ struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
* @allocate: allocate dmabuf and return fd
* @allocate: allocate dmabuf and return struct dma_buf ptr
*
* allocate returns dmabuf fd on success, -errno on error.
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
struct dma_heap_ops {
int (*allocate)(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags);
struct dma_buf *(*allocate)(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags);
};
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment