qib_ruc.c 22.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/spinlock.h>
35
#include <rdma/ib_smi.h>
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81

#include "qib.h"
#include "qib_mad.h"

/*
 * Convert the AETH RNR timeout code into the number of microseconds.
 */
const u32 ib_qib_rnr_table[32] = {
	655360,	/* 00: 655.36 */
	10,	/* 01:    .01 */
	20,	/* 02     .02 */
	30,	/* 03:    .03 */
	40,	/* 04:    .04 */
	60,	/* 05:    .06 */
	80,	/* 06:    .08 */
	120,	/* 07:    .12 */
	160,	/* 08:    .16 */
	240,	/* 09:    .24 */
	320,	/* 0A:    .32 */
	480,	/* 0B:    .48 */
	640,	/* 0C:    .64 */
	960,	/* 0D:    .96 */
	1280,	/* 0E:   1.28 */
	1920,	/* 0F:   1.92 */
	2560,	/* 10:   2.56 */
	3840,	/* 11:   3.84 */
	5120,	/* 12:   5.12 */
	7680,	/* 13:   7.68 */
	10240,	/* 14:  10.24 */
	15360,	/* 15:  15.36 */
	20480,	/* 16:  20.48 */
	30720,	/* 17:  30.72 */
	40960,	/* 18:  40.96 */
	61440,	/* 19:  61.44 */
	81920,	/* 1A:  81.92 */
	122880,	/* 1B: 122.88 */
	163840,	/* 1C: 163.84 */
	245760,	/* 1D: 245.76 */
	327680,	/* 1E: 327.68 */
	491520	/* 1F: 491.52 */
};

/*
 * Validate a RWQE and fill in the SGE state.
 * Return 1 if OK.
 */
82
static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
83 84 85
{
	int i, j, ret;
	struct ib_wc wc;
86
	struct rvt_lkey_table *rkt;
87
	struct rvt_pd *pd;
88
	struct rvt_sge_state *ss;
89

90
	rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
91
	pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
92 93 94 95 96 97 98
	ss = &qp->r_sge;
	ss->sg_list = qp->r_sg_list;
	qp->r_len = 0;
	for (i = j = 0; i < wqe->num_sge; i++) {
		if (wqe->sg_list[i].length == 0)
			continue;
		/* Check LKEY */
99
		if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
100 101 102 103 104 105 106 107 108 109 110 111
				 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
			goto bad_lkey;
		qp->r_len += wqe->sg_list[i].length;
		j++;
	}
	ss->num_sge = j;
	ss->total_len = qp->r_len;
	ret = 1;
	goto bail;

bad_lkey:
	while (j) {
112
		struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
113

114
		rvt_put_mr(sge->mr);
115 116 117 118 119 120 121 122
	}
	ss->num_sge = 0;
	memset(&wc, 0, sizeof(wc));
	wc.wr_id = wqe->wr_id;
	wc.status = IB_WC_LOC_PROT_ERR;
	wc.opcode = IB_WC_RECV;
	wc.qp = &qp->ibqp;
	/* Signal solicited completion event. */
123
	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
	ret = 0;
bail:
	return ret;
}

/**
 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
 * @qp: the QP
 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
 *
 * Return -1 if there is a local error, 0 if no RWQE is available,
 * otherwise return 1.
 *
 * Can be called from interrupt level.
 */
139
int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
140 141
{
	unsigned long flags;
142 143
	struct rvt_rq *rq;
	struct rvt_rwq *wq;
144
	struct rvt_srq *srq;
145
	struct rvt_rwqe *wqe;
146 147 148 149 150
	void (*handler)(struct ib_event *, void *);
	u32 tail;
	int ret;

	if (qp->ibqp.srq) {
151
		srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
152 153 154 155 156 157 158 159 160
		handler = srq->ibsrq.event_handler;
		rq = &srq->rq;
	} else {
		srq = NULL;
		handler = NULL;
		rq = &qp->r_rq;
	}

	spin_lock_irqsave(&rq->lock, flags);
161
	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
		ret = 0;
		goto unlock;
	}

	wq = rq->wq;
	tail = wq->tail;
	/* Validate tail before using it since it is user writable. */
	if (tail >= rq->size)
		tail = 0;
	if (unlikely(tail == wq->head)) {
		ret = 0;
		goto unlock;
	}
	/* Make sure entry is read after head index is read. */
	smp_rmb();
	wqe = get_rwqe_ptr(rq, tail);
	/*
	 * Even though we update the tail index in memory, the verbs
	 * consumer is not supposed to post more entries until a
	 * completion is generated.
	 */
	if (++tail >= rq->size)
		tail = 0;
	wq->tail = tail;
	if (!wr_id_only && !qib_init_sge(qp, wqe)) {
		ret = -1;
		goto unlock;
	}
	qp->r_wr_id = wqe->wr_id;

	ret = 1;
193
	set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
	if (handler) {
		u32 n;

		/*
		 * Validate head pointer value and compute
		 * the number of remaining WQEs.
		 */
		n = wq->head;
		if (n >= rq->size)
			n = 0;
		if (n < tail)
			n += rq->size - tail;
		else
			n -= tail;
		if (n < srq->limit) {
			struct ib_event ev;

			srq->limit = 0;
			spin_unlock_irqrestore(&rq->lock, flags);
			ev.device = qp->ibqp.device;
			ev.element.srq = qp->ibqp.srq;
			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
			handler(&ev, srq->ibsrq.srq_context);
			goto bail;
		}
	}
unlock:
	spin_unlock_irqrestore(&rq->lock, flags);
bail:
	return ret;
}

/*
 * Switch to alternate path.
 * The QP s_lock should be held and interrupts disabled.
 */
230
void qib_migrate_qp(struct rvt_qp *qp)
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
{
	struct ib_event ev;

	qp->s_mig_state = IB_MIG_MIGRATED;
	qp->remote_ah_attr = qp->alt_ah_attr;
	qp->port_num = qp->alt_ah_attr.port_num;
	qp->s_pkey_index = qp->s_alt_pkey_index;

	ev.device = qp->ibqp.device;
	ev.element.qp = &qp->ibqp;
	ev.event = IB_EVENT_PATH_MIG;
	qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}

static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
{
	if (!index) {
		struct qib_pportdata *ppd = ppd_from_ibp(ibp);

		return ppd->guid;
251 252
	}
	return ibp->guids[index - 1];
253 254 255 256 257 258 259 260 261 262 263
}

static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
{
	return (gid->global.interface_id == id &&
		(gid->global.subnet_prefix == gid_prefix ||
		 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
}

/*
 *
264 265 266
 * This should be called with the QP r_lock held.
 *
 * The s_lock will be acquired around the qib_migrate_qp() call.
267 268
 */
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
269
		      int has_grh, struct rvt_qp *qp, u32 bth0)
270 271
{
	__be64 guid;
272
	unsigned long flags;
273 274 275 276 277 278 279 280 281

	if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
		if (!has_grh) {
			if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
				goto err;
		} else {
			if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
				goto err;
			guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
282 283
			if (!gid_ok(&hdr->u.l.grh.dgid,
				    ibp->rvp.gid_prefix, guid))
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
				goto err;
			if (!gid_ok(&hdr->u.l.grh.sgid,
			    qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
			    qp->alt_ah_attr.grh.dgid.global.interface_id))
				goto err;
		}
		if (!qib_pkey_ok((u16)bth0,
				 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
				      (u16)bth0,
				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
				      0, qp->ibqp.qp_num,
				      hdr->lrh[3], hdr->lrh[1]);
			goto err;
		}
		/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
		if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
		    ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
			goto err;
303
		spin_lock_irqsave(&qp->s_lock, flags);
304
		qib_migrate_qp(qp);
305
		spin_unlock_irqrestore(&qp->s_lock, flags);
306 307 308 309 310 311 312 313 314
	} else {
		if (!has_grh) {
			if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
				goto err;
		} else {
			if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
				goto err;
			guid = get_sguid(ibp,
					 qp->remote_ah_attr.grh.sgid_index);
315 316
			if (!gid_ok(&hdr->u.l.grh.dgid,
				    ibp->rvp.gid_prefix, guid))
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
				goto err;
			if (!gid_ok(&hdr->u.l.grh.sgid,
			    qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
			    qp->remote_ah_attr.grh.dgid.global.interface_id))
				goto err;
		}
		if (!qib_pkey_ok((u16)bth0,
				 qib_get_pkey(ibp, qp->s_pkey_index))) {
			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
				      (u16)bth0,
				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
				      0, qp->ibqp.qp_num,
				      hdr->lrh[3], hdr->lrh[1]);
			goto err;
		}
		/* Validate the SLID. See Ch. 9.6.1.5 */
		if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
		    ppd_from_ibp(ibp)->port != qp->port_num)
			goto err;
		if (qp->s_mig_state == IB_MIG_REARM &&
		    !(bth0 & IB_BTH_MIG_REQ))
			qp->s_mig_state = IB_MIG_ARMED;
	}

	return 0;

err:
	return 1;
}

/**
 * qib_ruc_loopback - handle UC and RC lookback requests
 * @sqp: the sending QP
 *
 * This is called from qib_do_send() to
 * forward a WQE addressed to the same HCA.
 * Note that although we are single threaded due to the tasklet, we still
 * have to protect against post_send().  We don't have to worry about
 * receive interrupts since this is a connected protocol and all packets
 * will pass through here.
 */
358
static void qib_ruc_loopback(struct rvt_qp *sqp)
359 360
{
	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
361 362 363
	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
	struct qib_devdata *dd = ppd->dd;
	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
364 365 366
	struct rvt_qp *qp;
	struct rvt_swqe *wqe;
	struct rvt_sge *sge;
367 368 369 370 371 372 373 374
	unsigned long flags;
	struct ib_wc wc;
	u64 sdata;
	atomic64_t *maddr;
	enum ib_wc_status send_status;
	int release;
	int ret;

375
	rcu_read_lock();
376 377 378 379
	/*
	 * Note that we check the responder QP state after
	 * checking the requester's state.
	 */
380 381 382
	qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
	if (!qp)
		goto done;
383 384 385 386

	spin_lock_irqsave(&sqp->s_lock, flags);

	/* Return if we are already busy processing a work request. */
387
	if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
388
	    !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
389 390
		goto unlock;

391
	sqp->s_flags |= RVT_S_BUSY;
392 393 394 395

again:
	if (sqp->s_last == sqp->s_head)
		goto clr_busy;
396
	wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
397 398

	/* Return if it is not OK to start a new work reqeust. */
399 400
	if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
		if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
			goto clr_busy;
		/* We are in the error state, flush the work request. */
		send_status = IB_WC_WR_FLUSH_ERR;
		goto flush_send;
	}

	/*
	 * We can rely on the entry not changing without the s_lock
	 * being held until we update s_last.
	 * We increment s_cur to indicate s_last is in progress.
	 */
	if (sqp->s_last == sqp->s_cur) {
		if (++sqp->s_cur >= sqp->s_size)
			sqp->s_cur = 0;
	}
	spin_unlock_irqrestore(&sqp->s_lock, flags);

418
	if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
419
	    qp->ibqp.qp_type != sqp->ibqp.qp_type) {
420
		ibp->rvp.n_pkt_drops++;
421 422 423 424 425 426 427 428 429 430 431
		/*
		 * For RC, the requester would timeout and retry so
		 * shortcut the timeouts and just signal too many retries.
		 */
		if (sqp->ibqp.qp_type == IB_QPT_RC)
			send_status = IB_WC_RETRY_EXC_ERR;
		else
			send_status = IB_WC_SUCCESS;
		goto serr;
	}

432
	memset(&wc, 0, sizeof(wc));
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
	send_status = IB_WC_SUCCESS;

	release = 1;
	sqp->s_sge.sge = wqe->sg_list[0];
	sqp->s_sge.sg_list = wqe->sg_list + 1;
	sqp->s_sge.num_sge = wqe->wr.num_sge;
	sqp->s_len = wqe->length;
	switch (wqe->wr.opcode) {
	case IB_WR_SEND_WITH_IMM:
		wc.wc_flags = IB_WC_WITH_IMM;
		wc.ex.imm_data = wqe->wr.ex.imm_data;
		/* FALLTHROUGH */
	case IB_WR_SEND:
		ret = qib_get_rwqe(qp, 0);
		if (ret < 0)
			goto op_err;
		if (!ret)
			goto rnr_nak;
		break;

	case IB_WR_RDMA_WRITE_WITH_IMM:
		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
			goto inv_err;
		wc.wc_flags = IB_WC_WITH_IMM;
		wc.ex.imm_data = wqe->wr.ex.imm_data;
		ret = qib_get_rwqe(qp, 1);
		if (ret < 0)
			goto op_err;
		if (!ret)
			goto rnr_nak;
		/* FALLTHROUGH */
	case IB_WR_RDMA_WRITE:
		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
			goto inv_err;
		if (wqe->length == 0)
			break;
469
		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
470 471
					  wqe->rdma_wr.remote_addr,
					  wqe->rdma_wr.rkey,
472 473 474 475 476 477 478 479 480 481
					  IB_ACCESS_REMOTE_WRITE)))
			goto acc_err;
		qp->r_sge.sg_list = NULL;
		qp->r_sge.num_sge = 1;
		qp->r_sge.total_len = wqe->length;
		break;

	case IB_WR_RDMA_READ:
		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
			goto inv_err;
482
		if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
483 484
					  wqe->rdma_wr.remote_addr,
					  wqe->rdma_wr.rkey,
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
					  IB_ACCESS_REMOTE_READ)))
			goto acc_err;
		release = 0;
		sqp->s_sge.sg_list = NULL;
		sqp->s_sge.num_sge = 1;
		qp->r_sge.sge = wqe->sg_list[0];
		qp->r_sge.sg_list = wqe->sg_list + 1;
		qp->r_sge.num_sge = wqe->wr.num_sge;
		qp->r_sge.total_len = wqe->length;
		break;

	case IB_WR_ATOMIC_CMP_AND_SWP:
	case IB_WR_ATOMIC_FETCH_AND_ADD:
		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
			goto inv_err;
500
		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
501 502
					  wqe->atomic_wr.remote_addr,
					  wqe->atomic_wr.rkey,
503 504 505 506
					  IB_ACCESS_REMOTE_ATOMIC)))
			goto acc_err;
		/* Perform atomic OP and save result. */
		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
507
		sdata = wqe->atomic_wr.compare_add;
508
		*(u64 *) sqp->s_sge.sge.vaddr =
509
			(wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
510 511
			(u64) atomic64_add_return(sdata, maddr) - sdata :
			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
512
				      sdata, wqe->atomic_wr.swap);
513
		rvt_put_mr(qp->r_sge.sge.mr);
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
		qp->r_sge.num_sge = 0;
		goto send_comp;

	default:
		send_status = IB_WC_LOC_QP_OP_ERR;
		goto serr;
	}

	sge = &sqp->s_sge.sge;
	while (sqp->s_len) {
		u32 len = sqp->s_len;

		if (len > sge->length)
			len = sge->length;
		if (len > sge->sge_length)
			len = sge->sge_length;
		BUG_ON(len == 0);
		qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
		sge->vaddr += len;
		sge->length -= len;
		sge->sge_length -= len;
		if (sge->sge_length == 0) {
			if (!release)
537
				rvt_put_mr(sge->mr);
538 539 540
			if (--sqp->s_sge.num_sge)
				*sge = *sqp->s_sge.sg_list++;
		} else if (sge->length == 0 && sge->mr->lkey) {
541
			if (++sge->n >= RVT_SEGSZ) {
542 543 544 545 546 547 548 549 550 551 552 553
				if (++sge->m >= sge->mr->mapsz)
					break;
				sge->n = 0;
			}
			sge->vaddr =
				sge->mr->map[sge->m]->segs[sge->n].vaddr;
			sge->length =
				sge->mr->map[sge->m]->segs[sge->n].length;
		}
		sqp->s_len -= len;
	}
	if (release)
554
		qib_put_ss(&qp->r_sge);
555

556
	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
		goto send_comp;

	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
	else
		wc.opcode = IB_WC_RECV;
	wc.wr_id = qp->r_wr_id;
	wc.status = IB_WC_SUCCESS;
	wc.byte_len = wqe->length;
	wc.qp = &qp->ibqp;
	wc.src_qp = qp->remote_qpn;
	wc.slid = qp->remote_ah_attr.dlid;
	wc.sl = qp->remote_ah_attr.sl;
	wc.port_num = 1;
	/* Signal completion event if the solicited bit is set. */
572 573
	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
		     wqe->wr.send_flags & IB_SEND_SOLICITED);
574 575 576

send_comp:
	spin_lock_irqsave(&sqp->s_lock, flags);
577
	ibp->rvp.n_loop_pkts++;
578 579 580 581 582 583 584 585 586
flush_send:
	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
	qib_send_complete(sqp, wqe, send_status);
	goto again;

rnr_nak:
	/* Handle RNR NAK */
	if (qp->ibqp.qp_type == IB_QPT_UC)
		goto send_comp;
587
	ibp->rvp.n_rnr_naks++;
588 589 590 591 592 593 594 595 596 597 598
	/*
	 * Note: we don't need the s_lock held since the BUSY flag
	 * makes this single threaded.
	 */
	if (sqp->s_rnr_retry == 0) {
		send_status = IB_WC_RNR_RETRY_EXC_ERR;
		goto serr;
	}
	if (sqp->s_rnr_retry_cnt < 7)
		sqp->s_rnr_retry--;
	spin_lock_irqsave(&sqp->s_lock, flags);
599
	if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
600
		goto clr_busy;
601
	sqp->s_flags |= RVT_S_WAIT_RNR;
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	sqp->s_timer.function = qib_rc_rnr_retry;
	sqp->s_timer.expires = jiffies +
		usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
	add_timer(&sqp->s_timer);
	goto clr_busy;

op_err:
	send_status = IB_WC_REM_OP_ERR;
	wc.status = IB_WC_LOC_QP_OP_ERR;
	goto err;

inv_err:
	send_status = IB_WC_REM_INV_REQ_ERR;
	wc.status = IB_WC_LOC_QP_OP_ERR;
	goto err;

acc_err:
	send_status = IB_WC_REM_ACCESS_ERR;
	wc.status = IB_WC_LOC_PROT_ERR;
err:
	/* responder goes to error state */
	qib_rc_error(qp, wc.status);

serr:
	spin_lock_irqsave(&sqp->s_lock, flags);
	qib_send_complete(sqp, wqe, send_status);
	if (sqp->ibqp.qp_type == IB_QPT_RC) {
		int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);

631
		sqp->s_flags &= ~RVT_S_BUSY;
632 633 634 635 636 637 638 639 640 641 642 643
		spin_unlock_irqrestore(&sqp->s_lock, flags);
		if (lastwqe) {
			struct ib_event ev;

			ev.device = sqp->ibqp.device;
			ev.element.qp = &sqp->ibqp;
			ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
			sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
		}
		goto done;
	}
clr_busy:
644
	sqp->s_flags &= ~RVT_S_BUSY;
645 646 647
unlock:
	spin_unlock_irqrestore(&sqp->s_lock, flags);
done:
648
	rcu_read_unlock();
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
}

/**
 * qib_make_grh - construct a GRH header
 * @ibp: a pointer to the IB port
 * @hdr: a pointer to the GRH header being constructed
 * @grh: the global route address to send to
 * @hwords: the number of 32 bit words of header being sent
 * @nwords: the number of 32 bit words of data being sent
 *
 * Return the size of the header in 32 bit words.
 */
u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
		 struct ib_global_route *grh, u32 hwords, u32 nwords)
{
	hdr->version_tclass_flow =
		cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
			    (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
			    (grh->flow_label << IB_GRH_FLOW_SHIFT));
	hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
	hdr->next_hdr = IB_GRH_NEXT_HDR;
	hdr->hop_limit = grh->hop_limit;
	/* The SGID is 32-bit aligned. */
673
	hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
674 675 676 677 678 679 680 681
	hdr->sgid.global.interface_id = grh->sgid_index ?
		ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
	hdr->dgid = grh->dgid;

	/* GRH header size in 32-bit words. */
	return sizeof(struct ib_grh) / sizeof(u32);
}

682
void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
683 684
			 u32 bth0, u32 bth2)
{
685
	struct qib_qp_priv *priv = qp->priv;
686 687 688 689 690 691 692 693 694 695
	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	u16 lrh0;
	u32 nwords;
	u32 extra_bytes;

	/* Construct the header. */
	extra_bytes = -qp->s_cur_size & 3;
	nwords = (qp->s_cur_size + extra_bytes) >> 2;
	lrh0 = QIB_LRH_BTH;
	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
696
		qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
697 698 699 700 701 702
					       &qp->remote_ah_attr.grh,
					       qp->s_hdrwords, nwords);
		lrh0 = QIB_LRH_GRH;
	}
	lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
		qp->remote_ah_attr.sl << 4;
703 704 705 706 707
	priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
	priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
	priv->s_hdr->lrh[2] =
			cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
	priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
708 709 710 711 712 713 714 715
				       qp->remote_ah_attr.src_path_bits);
	bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
	bth0 |= extra_bytes << 20;
	if (qp->s_mig_state == IB_MIG_MIGRATED)
		bth0 |= IB_BTH_MIG_REQ;
	ohdr->bth[0] = cpu_to_be32(bth0);
	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
	ohdr->bth[2] = cpu_to_be32(bth2);
716
	this_cpu_inc(ibp->pmastats->n_unicast_xmit);
717 718
}

719 720 721 722 723 724 725 726 727
void _qib_do_send(struct work_struct *work)
{
	struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
						s_work);
	struct rvt_qp *qp = priv->owner;

	qib_do_send(qp);
}

728 729
/**
 * qib_do_send - perform a send on a QP
730
 * @qp: pointer to the QP
731 732 733 734 735
 *
 * Process entries in the send work queue until credit or queue is
 * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
 * Otherwise, two threads could send packets out of order.
 */
736
void qib_do_send(struct rvt_qp *qp)
737
{
738
	struct qib_qp_priv *priv = qp->priv;
739 740
	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
741
	int (*make_req)(struct rvt_qp *qp);
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
	unsigned long flags;

	if ((qp->ibqp.qp_type == IB_QPT_RC ||
	     qp->ibqp.qp_type == IB_QPT_UC) &&
	    (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
		qib_ruc_loopback(qp);
		return;
	}

	if (qp->ibqp.qp_type == IB_QPT_RC)
		make_req = qib_make_rc_req;
	else if (qp->ibqp.qp_type == IB_QPT_UC)
		make_req = qib_make_uc_req;
	else
		make_req = qib_make_ud_req;

	spin_lock_irqsave(&qp->s_lock, flags);

	/* Return if we are already busy processing a work request. */
	if (!qib_send_ok(qp)) {
		spin_unlock_irqrestore(&qp->s_lock, flags);
		return;
	}

766
	qp->s_flags |= RVT_S_BUSY;
767 768 769 770 771 772 773 774 775 776

	spin_unlock_irqrestore(&qp->s_lock, flags);

	do {
		/* Check for a constructed packet to be sent. */
		if (qp->s_hdrwords != 0) {
			/*
			 * If the packet cannot be sent now, return and
			 * the send tasklet will be woken up later.
			 */
777
			if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
778 779 780 781 782 783 784 785 786 787 788
					   qp->s_cur_sge, qp->s_cur_size))
				break;
			/* Record that s_hdr is empty. */
			qp->s_hdrwords = 0;
		}
	} while (make_req(qp));
}

/*
 * This should be called with s_lock held.
 */
789
void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
790 791 792 793 794
		       enum ib_wc_status status)
{
	u32 old_last, last;
	unsigned i;

795
	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
796 797 798
		return;

	for (i = 0; i < wqe->wr.num_sge; i++) {
799
		struct rvt_sge *sge = &wqe->sg_list[i];
800

801
		rvt_put_mr(sge->mr);
802 803 804 805
	}
	if (qp->ibqp.qp_type == IB_QPT_UD ||
	    qp->ibqp.qp_type == IB_QPT_SMI ||
	    qp->ibqp.qp_type == IB_QPT_GSI)
806
		atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
807 808

	/* See ch. 11.2.4.1 and 10.7.3.1 */
809
	if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
810 811 812 813
	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
	    status != IB_WC_SUCCESS) {
		struct ib_wc wc;

814
		memset(&wc, 0, sizeof(wc));
815 816 817 818 819 820
		wc.wr_id = wqe->wr.wr_id;
		wc.status = status;
		wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
		wc.qp = &qp->ibqp;
		if (status == IB_WC_SUCCESS)
			wc.byte_len = wqe->length;
821
		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
			     status != IB_WC_SUCCESS);
	}

	last = qp->s_last;
	old_last = last;
	if (++last >= qp->s_size)
		last = 0;
	qp->s_last = last;
	if (qp->s_acked == old_last)
		qp->s_acked = last;
	if (qp->s_cur == old_last)
		qp->s_cur = last;
	if (qp->s_tail == old_last)
		qp->s_tail = last;
	if (qp->state == IB_QPS_SQD && last == qp->s_cur)
		qp->s_draining = 0;
}