Commit 57d6ef46 authored by Bao D. Nguyen's avatar Bao D. Nguyen Committed by Martin K. Petersen

scsi: ufs: mcq: Use ufshcd_mcq_poll_cqe_lock() in MCQ mode

In preparation for adding MCQ error handler support, update the MCQ code to
use the ufshcd_mcq_poll_cqe_lock() in interrupt context instead of using
ufshcd_mcq_poll_cqe_nolock(). This is to keep synchronization between MCQ
interrupt and error handler contexts because both need to access the MCQ
hardware in separate contexts.
Signed-off-by: default avatarBao D. Nguyen <quic_nguyenb@quicinc.com>
Link: https://lore.kernel.org/r/6ae727ad2a4040469b8f0632b55e0577d80da11b.1685396241.git.quic_nguyenb@quicinc.comReviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Tested-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Reviewed-by: default avatarCan Guo <quic_cang@quicinc.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent f1304d44
...@@ -284,7 +284,7 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, ...@@ -284,7 +284,7 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
ufshcd_compl_one_cqe(hba, tag, cqe); ufshcd_compl_one_cqe(hba, tag, cqe);
} }
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, static unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq) struct ufs_hw_queue *hwq)
{ {
unsigned long completed_reqs = 0; unsigned long completed_reqs = 0;
...@@ -301,7 +301,6 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, ...@@ -301,7 +301,6 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
return completed_reqs; return completed_reqs;
} }
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq) struct ufs_hw_queue *hwq)
...@@ -314,6 +313,7 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, ...@@ -314,6 +313,7 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
return completed_reqs; return completed_reqs;
} }
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{ {
......
...@@ -71,8 +71,6 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); ...@@ -71,8 +71,6 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba); void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req); struct request *req);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
......
...@@ -6804,7 +6804,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba) ...@@ -6804,7 +6804,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
ufshcd_mcq_write_cqis(hba, events, i); ufshcd_mcq_write_cqis(hba, events, i);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS) if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
ufshcd_mcq_poll_cqe_nolock(hba, hwq); ufshcd_mcq_poll_cqe_lock(hba, hwq);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
......
...@@ -1556,7 +1556,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba) ...@@ -1556,7 +1556,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
struct ufs_hw_queue *hwq = &hba->uhq[id]; struct ufs_hw_queue *hwq = &hba->uhq[id];
ufshcd_mcq_write_cqis(hba, 0x1, id); ufshcd_mcq_write_cqis(hba, 0x1, id);
ufshcd_mcq_poll_cqe_nolock(hba, hwq); ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -1243,7 +1243,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); ...@@ -1243,7 +1243,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba); void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba); void ufshcd_schedule_eh_work(struct ufs_hba *hba);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq); struct ufs_hw_queue *hwq);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment