Commit 10be83cc authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'drm-fixes-2018-10-04' of git://anongit.freedesktop.org/drm/drm

Dave writes:
  "drm exynos, tda9950 and intel fixes

   3 i915 fixes:
     compressed error handling zlib fix
     compiler warning cleanup
     and a minor code cleanup

   2 tda9950:
     Two fixes for the HDMI CEC

   1 exynos:
     A fix required for IOMMU interaction."

* tag 'drm-fixes-2018-10-04' of git://anongit.freedesktop.org/drm/drm:
  drm/i915: Handle incomplete Z_FINISH for compressed error states
  drm/i915: Avoid compiler warning for maybe unused gu_misc_iir
  drm/i915: Do not redefine the has_csr parameter.
  drm/exynos: Use selected dma_dev default iommu domain instead of a fake one
  drm/i2c: tda9950: set MAX_RETRIES for errors only
  drm/i2c: tda9950: fix timeout counter check
parents 1b0350c3 d8938c98
...@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv, ...@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
unsigned long start, unsigned long size) unsigned long start, unsigned long size)
{ {
struct iommu_domain *domain; priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
int ret;
domain = iommu_domain_alloc(priv->dma_dev->bus);
if (!domain)
return -ENOMEM;
ret = iommu_get_dma_cookie(domain);
if (ret)
goto free_domain;
ret = iommu_dma_init_domain(domain, start, size, NULL);
if (ret)
goto put_cookie;
priv->mapping = domain;
return 0; return 0;
put_cookie:
iommu_put_dma_cookie(domain);
free_domain:
iommu_domain_free(domain);
return ret;
} }
static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv) static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
{ {
struct iommu_domain *domain = priv->mapping;
iommu_put_dma_cookie(domain);
iommu_domain_free(domain);
priv->mapping = NULL; priv->mapping = NULL;
} }
...@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv, ...@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
{ {
struct iommu_domain *domain = priv->mapping; struct iommu_domain *domain = priv->mapping;
return iommu_attach_device(domain, dev); if (dev != priv->dma_dev)
return iommu_attach_device(domain, dev);
return 0;
} }
static inline void __exynos_iommu_detach(struct exynos_drm_private *priv, static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
...@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv, ...@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
{ {
struct iommu_domain *domain = priv->mapping; struct iommu_domain *domain = priv->mapping;
iommu_detach_device(domain, dev); if (dev != priv->dma_dev)
iommu_detach_device(domain, dev);
} }
#else #else
#error Unsupported architecture and IOMMU/DMA-mapping glue code #error Unsupported architecture and IOMMU/DMA-mapping glue code
......
...@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data) ...@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
break; break;
} }
/* TDA9950 executes all retries for us */ /* TDA9950 executes all retries for us */
tx_status |= CEC_TX_STATUS_MAX_RETRIES; if (tx_status != CEC_TX_STATUS_OK)
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt, cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
nack_cnt, 0, err_cnt); nack_cnt, 0, err_cnt);
break; break;
...@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv) ...@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
/* Wait up to .5s for it to signal non-busy */ /* Wait up to .5s for it to signal non-busy */
do { do {
csr = tda9950_read(client, REG_CSR); csr = tda9950_read(client, REG_CSR);
if (!(csr & CSR_BUSY) || --timeout) if (!(csr & CSR_BUSY) || !--timeout)
break; break;
msleep(10); msleep(10);
} while (1); } while (1);
......
...@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c) ...@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
return true; return true;
} }
static void *compress_next_page(struct drm_i915_error_object *dst)
{
unsigned long page;
if (dst->page_count >= dst->num_pages)
return ERR_PTR(-ENOSPC);
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return ERR_PTR(-ENOMEM);
return dst->pages[dst->page_count++] = (void *)page;
}
static int compress_page(struct compress *c, static int compress_page(struct compress *c,
void *src, void *src,
struct drm_i915_error_object *dst) struct drm_i915_error_object *dst)
...@@ -245,19 +259,14 @@ static int compress_page(struct compress *c, ...@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
do { do {
if (zstream->avail_out == 0) { if (zstream->avail_out == 0) {
unsigned long page; zstream->next_out = compress_next_page(dst);
if (IS_ERR(zstream->next_out))
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); return PTR_ERR(zstream->next_out);
if (!page)
return -ENOMEM;
dst->pages[dst->page_count++] = (void *)page;
zstream->next_out = (void *)page;
zstream->avail_out = PAGE_SIZE; zstream->avail_out = PAGE_SIZE;
} }
if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK) if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO; return -EIO;
} while (zstream->avail_in); } while (zstream->avail_in);
...@@ -268,19 +277,42 @@ static int compress_page(struct compress *c, ...@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
return 0; return 0;
} }
static void compress_fini(struct compress *c, static int compress_flush(struct compress *c,
struct drm_i915_error_object *dst) struct drm_i915_error_object *dst)
{ {
struct z_stream_s *zstream = &c->zstream; struct z_stream_s *zstream = &c->zstream;
if (dst) { do {
zlib_deflate(zstream, Z_FINISH); switch (zlib_deflate(zstream, Z_FINISH)) {
dst->unused = zstream->avail_out; case Z_OK: /* more space requested */
} zstream->next_out = compress_next_page(dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
zstream->avail_out = PAGE_SIZE;
break;
case Z_STREAM_END:
goto end;
default: /* any error */
return -EIO;
}
} while (1);
end:
memset(zstream->next_out, 0, zstream->avail_out);
dst->unused = zstream->avail_out;
return 0;
}
static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst)
{
struct z_stream_s *zstream = &c->zstream;
zlib_deflateEnd(zstream); zlib_deflateEnd(zstream);
kfree(zstream->workspace); kfree(zstream->workspace);
if (c->tmp) if (c->tmp)
free_page((unsigned long)c->tmp); free_page((unsigned long)c->tmp);
} }
...@@ -319,6 +351,12 @@ static int compress_page(struct compress *c, ...@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
return 0; return 0;
} }
static int compress_flush(struct compress *c,
struct drm_i915_error_object *dst)
{
return 0;
}
static void compress_fini(struct compress *c, static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst) struct drm_i915_error_object *dst)
{ {
...@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915, ...@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
unsigned long num_pages; unsigned long num_pages;
struct sgt_iter iter; struct sgt_iter iter;
dma_addr_t dma; dma_addr_t dma;
int ret;
if (!vma) if (!vma)
return NULL; return NULL;
...@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915, ...@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->gtt_offset = vma->node.start; dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size; dst->gtt_size = vma->node.size;
dst->num_pages = num_pages;
dst->page_count = 0; dst->page_count = 0;
dst->unused = 0; dst->unused = 0;
...@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915, ...@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
return NULL; return NULL;
} }
ret = -EINVAL;
for_each_sgt_dma(dma, iter, vma->pages) { for_each_sgt_dma(dma, iter, vma->pages) {
void __iomem *s; void __iomem *s;
int ret;
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst); ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
if (ret) if (ret)
goto unwind; break;
} }
goto out;
unwind: if (ret || compress_flush(&compress, dst)) {
while (dst->page_count--) while (dst->page_count--)
free_page((unsigned long)dst->pages[dst->page_count]); free_page((unsigned long)dst->pages[dst->page_count]);
kfree(dst); kfree(dst);
dst = NULL; dst = NULL;
}
out:
compress_fini(&compress, dst); compress_fini(&compress, dst);
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst; return dst;
......
...@@ -135,6 +135,7 @@ struct i915_gpu_state { ...@@ -135,6 +135,7 @@ struct i915_gpu_state {
struct drm_i915_error_object { struct drm_i915_error_object {
u64 gtt_offset; u64 gtt_offset;
u64 gtt_size; u64 gtt_size;
int num_pages;
int page_count; int page_count;
int unused; int unused;
u32 *pages[0]; u32 *pages[0];
......
...@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915, ...@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
spin_unlock(&i915->irq_lock); spin_unlock(&i915->irq_lock);
} }
static void static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl, gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
u32 *iir)
{ {
void __iomem * const regs = dev_priv->regs; void __iomem * const regs = dev_priv->regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ)) if (!(master_ctl & GEN11_GU_MISC_IRQ))
return; return 0;
iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
if (likely(iir))
raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
*iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); return iir;
if (likely(*iir))
raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
} }
static void static void
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
const u32 master_ctl, const u32 iir)
{ {
if (!(master_ctl & GEN11_GU_MISC_IRQ))
return;
if (unlikely(!iir)) {
DRM_ERROR("GU_MISC iir blank!\n");
return;
}
if (iir & GEN11_GU_MISC_GSE) if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(dev_priv); intel_opregion_asle_intr(dev_priv);
else
DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
} }
static irqreturn_t gen11_irq_handler(int irq, void *arg) static irqreturn_t gen11_irq_handler(int irq, void *arg)
...@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) ...@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(i915); enable_rpm_wakeref_asserts(i915);
} }
gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir); gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
/* Acknowledge and enable interrupts. */ /* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir); gen11_gu_misc_irq_handler(i915, gu_misc_iir);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = { ...@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES, \ GEN10_FEATURES, \
GEN(11), \ GEN(11), \
.ddb_size = 2048, \ .ddb_size = 2048, \
.has_csr = 0, \
.has_logical_ring_elsq = 1 .has_logical_ring_elsq = 1
static const struct intel_device_info intel_icelake_11_info = { static const struct intel_device_info intel_icelake_11_info = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment