Commit a0fa7268 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Doug Ledford

IB/rxe: avoid putting a large struct rxe_qp on stack

A race condition fix added an rxe_qp structure to the stack in order
to be able to perform rollback in rxe_requester(), but the structure
is large enough to trigger the warning for possible stack overflow:

drivers/infiniband/sw/rxe/rxe_req.c: In function 'rxe_requester':
drivers/infiniband/sw/rxe/rxe_req.c:757:1: error: the frame size of 2064 bytes is larger than 1024 bytes [-Werror=frame-larger-than=]

This changes the rollback function to only save the psn inside
the qp, which is the only field we access in the rollback_qp
anyway.

Fixes: 3050b998 ("IB/rxe: Fix race condition between requester and completer")
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent e37a79e5
...@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp, ...@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
static void save_state(struct rxe_send_wqe *wqe, static void save_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp, struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe, struct rxe_send_wqe *rollback_wqe,
struct rxe_qp *rollback_qp) u32 *rollback_psn)
{ {
rollback_wqe->state = wqe->state; rollback_wqe->state = wqe->state;
rollback_wqe->first_psn = wqe->first_psn; rollback_wqe->first_psn = wqe->first_psn;
rollback_wqe->last_psn = wqe->last_psn; rollback_wqe->last_psn = wqe->last_psn;
rollback_qp->req.psn = qp->req.psn; *rollback_psn = qp->req.psn;
} }
static void rollback_state(struct rxe_send_wqe *wqe, static void rollback_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp, struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe, struct rxe_send_wqe *rollback_wqe,
struct rxe_qp *rollback_qp) u32 rollback_psn)
{ {
wqe->state = rollback_wqe->state; wqe->state = rollback_wqe->state;
wqe->first_psn = rollback_wqe->first_psn; wqe->first_psn = rollback_wqe->first_psn;
wqe->last_psn = rollback_wqe->last_psn; wqe->last_psn = rollback_wqe->last_psn;
qp->req.psn = rollback_qp->req.psn; qp->req.psn = rollback_psn;
} }
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
...@@ -593,8 +593,8 @@ int rxe_requester(void *arg) ...@@ -593,8 +593,8 @@ int rxe_requester(void *arg)
int mtu; int mtu;
int opcode; int opcode;
int ret; int ret;
struct rxe_qp rollback_qp;
struct rxe_send_wqe rollback_wqe; struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
next_wqe: next_wqe:
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
...@@ -718,7 +718,7 @@ int rxe_requester(void *arg) ...@@ -718,7 +718,7 @@ int rxe_requester(void *arg)
* rxe_xmit_packet(). * rxe_xmit_packet().
* Otherwise, completer might initiate an unjustified retry flow. * Otherwise, completer might initiate an unjustified retry flow.
*/ */
save_state(wqe, qp, &rollback_wqe, &rollback_qp); save_state(wqe, qp, &rollback_wqe, &rollback_psn);
update_wqe_state(qp, wqe, &pkt); update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload); update_wqe_psn(qp, wqe, &pkt, payload);
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
...@@ -726,7 +726,7 @@ int rxe_requester(void *arg) ...@@ -726,7 +726,7 @@ int rxe_requester(void *arg)
qp->need_req_skb = 1; qp->need_req_skb = 1;
kfree_skb(skb); kfree_skb(skb);
rollback_state(wqe, qp, &rollback_wqe, &rollback_qp); rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
rxe_run_task(&qp->req.task, 1); rxe_run_task(&qp->req.task, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment