Commit 56acb1b6 authored by Dmitry Baryshkov's avatar Dmitry Baryshkov

drm/msm/dpu: make the irq table size static

The size of the irq table is static, it has MDP_INTR_MAX * 32 interrupt
entries. Provide the fixed length and drop struct_size() statement.
Reviewed-by: default avatarMarijn Suijten <marijn.suijten@somainline.org>
Signed-off-by: default avatarDmitry Baryshkov <dmitry.baryshkov@linaro.org>
Patchwork: https://patchwork.freedesktop.org/patch/550927/
Link: https://lore.kernel.org/r/20230802100426.4184892-6-dmitry.baryshkov@linaro.org
parent ea4842ed
...@@ -200,10 +200,9 @@ static const struct dpu_intr_reg dpu_intr_set_7xxx[] = { ...@@ -200,10 +200,9 @@ static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
#define DPU_IRQ_REG(irq_idx) (irq_idx / 32) #define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32)) #define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
static inline bool dpu_core_irq_is_valid(struct dpu_hw_intr *intr, static inline bool dpu_core_irq_is_valid(int irq_idx)
int irq_idx)
{ {
return irq_idx >= 0 && irq_idx < intr->total_irqs; return irq_idx >= 0 && irq_idx < DPU_NUM_IRQS;
} }
static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr, static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
...@@ -305,7 +304,7 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) ...@@ -305,7 +304,7 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
if (!intr) if (!intr)
return -EINVAL; return -EINVAL;
if (!dpu_core_irq_is_valid(intr, irq_idx)) { if (!dpu_core_irq_is_valid(irq_idx)) {
pr_err("invalid IRQ index: [%d]\n", irq_idx); pr_err("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL; return -EINVAL;
} }
...@@ -358,7 +357,7 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) ...@@ -358,7 +357,7 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
if (!intr) if (!intr)
return -EINVAL; return -EINVAL;
if (!dpu_core_irq_is_valid(intr, irq_idx)) { if (!dpu_core_irq_is_valid(irq_idx)) {
pr_err("invalid IRQ index: [%d]\n", irq_idx); pr_err("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL; return -EINVAL;
} }
...@@ -443,7 +442,7 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx) ...@@ -443,7 +442,7 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
if (!intr) if (!intr)
return 0; return 0;
if (!dpu_core_irq_is_valid(intr, irq_idx)) { if (!dpu_core_irq_is_valid(irq_idx)) {
pr_err("invalid IRQ index: [%d]\n", irq_idx); pr_err("invalid IRQ index: [%d]\n", irq_idx);
return 0; return 0;
} }
...@@ -470,13 +469,12 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, ...@@ -470,13 +469,12 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
const struct dpu_mdss_cfg *m) const struct dpu_mdss_cfg *m)
{ {
struct dpu_hw_intr *intr; struct dpu_hw_intr *intr;
int nirq = MDP_INTR_MAX * 32;
unsigned int i; unsigned int i;
if (!addr || !m) if (!addr || !m)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL); intr = kzalloc(sizeof(*intr), GFP_KERNEL);
if (!intr) if (!intr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -487,8 +485,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, ...@@ -487,8 +485,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
intr->hw.blk_addr = addr + m->mdp[0].base; intr->hw.blk_addr = addr + m->mdp[0].base;
intr->total_irqs = nirq;
intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) | intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
BIT(MDP_SSPP_TOP0_INTR2) | BIT(MDP_SSPP_TOP0_INTR2) |
BIT(MDP_SSPP_TOP0_HIST_INTR); BIT(MDP_SSPP_TOP0_HIST_INTR);
...@@ -527,7 +523,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, ...@@ -527,7 +523,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
return -EINVAL; return -EINVAL;
} }
if (!dpu_core_irq_is_valid(dpu_kms->hw_intr, irq_idx)) { if (!dpu_core_irq_is_valid(irq_idx)) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL; return -EINVAL;
} }
...@@ -566,7 +562,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx) ...@@ -566,7 +562,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
unsigned long irq_flags; unsigned long irq_flags;
int ret; int ret;
if (!dpu_core_irq_is_valid(dpu_kms->hw_intr, irq_idx)) { if (!dpu_core_irq_is_valid(irq_idx)) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL; return -EINVAL;
} }
...@@ -601,7 +597,7 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) ...@@ -601,7 +597,7 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
int i, irq_count; int i, irq_count;
void *cb; void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { for (i = 0; i < DPU_NUM_IRQS; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i); irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
irq_count = atomic_read(&irq_entry->count); irq_count = atomic_read(&irq_entry->count);
...@@ -636,7 +632,7 @@ void dpu_core_irq_preinstall(struct msm_kms *kms) ...@@ -636,7 +632,7 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
dpu_disable_all_irqs(dpu_kms); dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev); pm_runtime_put_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { for (i = 0; i < DPU_NUM_IRQS; i++) {
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i); irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
atomic_set(&irq_entry->count, 0); atomic_set(&irq_entry->count, 0);
} }
...@@ -652,7 +648,7 @@ void dpu_core_irq_uninstall(struct msm_kms *kms) ...@@ -652,7 +648,7 @@ void dpu_core_irq_uninstall(struct msm_kms *kms)
return; return;
pm_runtime_get_sync(&dpu_kms->pdev->dev); pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { for (i = 0; i < DPU_NUM_IRQS; i++) {
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i); irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
if (irq_entry->cb) if (irq_entry->cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i); DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
......
...@@ -38,6 +38,8 @@ enum dpu_hw_intr_reg { ...@@ -38,6 +38,8 @@ enum dpu_hw_intr_reg {
#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset) #define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
#define DPU_NUM_IRQS (MDP_INTR_MAX * 32)
struct dpu_hw_intr_entry { struct dpu_hw_intr_entry {
void (*cb)(void *arg); void (*cb)(void *arg);
void *arg; void *arg;
...@@ -50,7 +52,6 @@ struct dpu_hw_intr_entry { ...@@ -50,7 +52,6 @@ struct dpu_hw_intr_entry {
* @ops: function pointer mapping for IRQ handling * @ops: function pointer mapping for IRQ handling
* @cache_irq_mask: array of IRQ enable masks reg storage created during init * @cache_irq_mask: array of IRQ enable masks reg storage created during init
* @save_irq_status: array of IRQ status reg storage created during init * @save_irq_status: array of IRQ status reg storage created during init
* @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources * @irq_lock: spinlock for accessing IRQ resources
* @irq_cb_tbl: array of IRQ callbacks * @irq_cb_tbl: array of IRQ callbacks
*/ */
...@@ -58,12 +59,11 @@ struct dpu_hw_intr { ...@@ -58,12 +59,11 @@ struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw; struct dpu_hw_blk_reg_map hw;
u32 cache_irq_mask[MDP_INTR_MAX]; u32 cache_irq_mask[MDP_INTR_MAX];
u32 *save_irq_status; u32 *save_irq_status;
u32 total_irqs;
spinlock_t irq_lock; spinlock_t irq_lock;
unsigned long irq_mask; unsigned long irq_mask;
const struct dpu_intr_reg *intr_set; const struct dpu_intr_reg *intr_set;
struct dpu_hw_intr_entry irq_tbl[]; struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS];
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment