Commit 2771e9ed authored by Hoang-Nam Nguyen's avatar Hoang-Nam Nguyen Committed by Roland Dreier

IB/ehca: Use WQE offset instead of WQE addr for pending work reqs

This is a patch for ehca to fix a bug in prepare_sqe_to_rts(), which
used WQE address to iterate pending work requests.  This might cause
an access violation since the queue pages can not be assumed to follow
each other consecutively.  Thus, this patch introduces a few queue
functions to determine WQE offset based on its address and uses WQE
offset to iterate the pending work requests.
Signed-off-by: default avatarHoang-Nam Nguyen <hnguyen@de.ibm.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 9ab1ffa8
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
MODULE_VERSION("SVNEHCA_0018"); MODULE_VERSION("SVNEHCA_0019");
int ehca_open_aqp1 = 0; int ehca_open_aqp1 = 0;
int ehca_debug_level = 0; int ehca_debug_level = 0;
...@@ -790,7 +790,7 @@ int __init ehca_module_init(void) ...@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
int ret; int ret;
printk(KERN_INFO "eHCA Infiniband Device Driver " printk(KERN_INFO "eHCA Infiniband Device Driver "
"(Rel.: SVNEHCA_0018)\n"); "(Rel.: SVNEHCA_0019)\n");
idr_init(&ehca_qp_idr); idr_init(&ehca_qp_idr);
idr_init(&ehca_cq_idr); idr_init(&ehca_cq_idr);
spin_lock_init(&ehca_qp_idr_lock); spin_lock_init(&ehca_qp_idr_lock);
......
...@@ -732,8 +732,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, ...@@ -732,8 +732,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
u64 h_ret; u64 h_ret;
struct ipz_queue *squeue; struct ipz_queue *squeue;
void *bad_send_wqe_p, *bad_send_wqe_v; void *bad_send_wqe_p, *bad_send_wqe_v;
void *squeue_start_p, *squeue_end_p; u64 q_ofs;
void *squeue_start_v, *squeue_end_v;
struct ehca_wqe *wqe; struct ehca_wqe *wqe;
int qp_num = my_qp->ib_qp.qp_num; int qp_num = my_qp->ib_qp.qp_num;
...@@ -755,26 +754,23 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, ...@@ -755,26 +754,23 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
if (ehca_debug_level) if (ehca_debug_level)
ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
squeue = &my_qp->ipz_squeue; squeue = &my_qp->ipz_squeue;
squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L)); if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
squeue_end_p = squeue_start_p+squeue->queue_length; ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
squeue_start_v = abs_to_virt((u64)squeue_start_p); " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
squeue_end_v = abs_to_virt((u64)squeue_end_p); return -EFAULT;
ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p", }
qp_num, squeue_start_v, squeue_end_v);
/* loop sets wqe's purge bit */ /* loop sets wqe's purge bit */
wqe = (struct ehca_wqe*)bad_send_wqe_v; wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = 0; *bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) { while (wqe->optype != 0xff && wqe->wqef != 0xff) {
if (ehca_debug_level) if (ehca_debug_level)
ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
wqe->nr_of_data_seg = 0; /* suppress data access */ wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */ wqe->wqef = WQEF_PURGE; /* WQE to be purged */
wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size); q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = (*bad_wqe_cnt)+1; *bad_wqe_cnt = (*bad_wqe_cnt)+1;
if ((void*)wqe >= squeue_end_v) {
wqe = squeue_start_v;
}
} }
/* /*
* bad wqe will be reprocessed and ignored when pol_cq() is called, * bad wqe will be reprocessed and ignored when pol_cq() is called,
......
...@@ -70,6 +70,19 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) ...@@ -70,6 +70,19 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
return ret; return ret;
} }
int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
{
int i;
for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
u64 page = (u64)virt_to_abs(queue->queue_pages[i]);
if (addr >= page && addr < page + queue->pagesize) {
*q_offset = addr - page + i * queue->pagesize;
return 0;
}
}
return -EINVAL;
}
int ipz_queue_ctor(struct ipz_queue *queue, int ipz_queue_ctor(struct ipz_queue *queue,
const u32 nr_of_pages, const u32 nr_of_pages,
const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) const u32 pagesize, const u32 qe_size, const u32 nr_of_sg)
......
...@@ -150,6 +150,21 @@ static inline void *ipz_qeit_reset(struct ipz_queue *queue) ...@@ -150,6 +150,21 @@ static inline void *ipz_qeit_reset(struct ipz_queue *queue)
return ipz_qeit_get(queue); return ipz_qeit_get(queue);
} }
/*
* return the q_offset corresponding to an absolute address
*/
int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
/*
* return the next queue offset. don't modify the queue.
*/
static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
{
offset += queue->qe_size;
if (offset >= queue->queue_length) offset = 0;
return offset;
}
/* struct generic page table */ /* struct generic page table */
struct ipz_pt { struct ipz_pt {
u64 entries[EHCA_PT_ENTRIES]; u64 entries[EHCA_PT_ENTRIES];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment