Commit 53173920 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/fmr_pool: Stop ib_fmr threads from contributing to load average
  IB/ipath: Fix incorrect use of sizeof on msg buffer (function argument)
  IB/ipath: Limit length checksummed in eeprom
  IB/ipath: Fix a race where s_last is updated without lock held
  IB/mlx4: Lock SQ lock in mlx4_ib_post_send()
  IPoIB/cm: Fix receive QP cleanup
parents e403149c 3f776e8a
......@@ -291,10 +291,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
atomic_set(&pool->flush_ser, 0);
init_waitqueue_head(&pool->force_wait);
pool->thread = kthread_create(ib_fmr_cleanup_thread,
pool,
"ib_fmr(%s)",
device->name);
pool->thread = kthread_run(ib_fmr_cleanup_thread,
pool,
"ib_fmr(%s)",
device->name);
if (IS_ERR(pool->thread)) {
printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
ret = PTR_ERR(pool->thread);
......
......@@ -538,7 +538,15 @@ static u8 flash_csum(struct ipath_flash *ifp, int adjust)
u8 *ip = (u8 *) ifp;
u8 csum = 0, len;
for (len = 0; len < ifp->if_length; len++)
/*
* Limit length checksummed to max length of actual data.
* Checksum of erased eeprom will still be bad, but we avoid
* reading past the end of the buffer we were passed.
*/
len = ifp->if_length;
if (len > sizeof(struct ipath_flash))
len = sizeof(struct ipath_flash);
while (len--)
csum += *ip++;
csum -= ifp->if_csum;
csum = ~csum;
......
......@@ -453,7 +453,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
}
static void handle_supp_msgs(struct ipath_devdata *dd,
unsigned supp_msgs, char msg[512])
unsigned supp_msgs, char *msg, int msgsz)
{
/*
* Print the message unless it's ibc status change only, which
......@@ -461,9 +461,9 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
*/
if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
int iserr;
iserr = ipath_decode_err(msg, sizeof msg,
dd->ipath_lasterror &
~INFINIPATH_E_IBSTATUSCHANGED);
iserr = ipath_decode_err(msg, msgsz,
dd->ipath_lasterror &
~INFINIPATH_E_IBSTATUSCHANGED);
if (dd->ipath_lasterror &
~(INFINIPATH_E_RRCVEGRFULL |
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
......@@ -492,8 +492,8 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
}
static unsigned handle_frequent_errors(struct ipath_devdata *dd,
ipath_err_t errs, char msg[512],
int *noprint)
ipath_err_t errs, char *msg,
int msgsz, int *noprint)
{
unsigned long nc;
static unsigned long nextmsg_time;
......@@ -512,7 +512,7 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
nextmsg_time = nc + HZ * 3;
}
else if (supp_msgs) {
handle_supp_msgs(dd, supp_msgs, msg);
handle_supp_msgs(dd, supp_msgs, msg, msgsz);
supp_msgs = 0;
nmsgs = 0;
}
......@@ -525,14 +525,14 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
{
char msg[512];
char msg[128];
u64 ignore_this_time = 0;
int i, iserr = 0;
int chkerrpkts = 0, noprint = 0;
unsigned supp_msgs;
int log_idx;
supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);
supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint);
/* don't report errors that are masked */
errs &= ~dd->ipath_maskederrs;
......
......@@ -630,11 +630,8 @@ bail:;
void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
enum ib_wc_status status)
{
u32 last = qp->s_last;
if (++last == qp->s_size)
last = 0;
qp->s_last = last;
unsigned long flags;
u32 last;
/* See ch. 11.2.4.1 and 10.7.3.1 */
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
......@@ -658,4 +655,11 @@ void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
wc.port_num = 0;
ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
}
spin_lock_irqsave(&qp->s_lock, flags);
last = qp->s_last;
if (++last >= qp->s_size)
last = 0;
qp->s_last = last;
spin_unlock_irqrestore(&qp->s_lock, flags);
}
......@@ -1282,7 +1282,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int size;
int i;
spin_lock_irqsave(&qp->rq.lock, flags);
spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq.head;
......@@ -1448,7 +1448,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
(qp->sq.wqe_cnt - 1));
}
spin_unlock_irqrestore(&qp->rq.lock, flags);
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
......
......@@ -60,7 +60,7 @@ static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
#define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff
#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
static struct ib_send_wr ipoib_cm_rx_drain_wr = {
.wr_id = IPOIB_CM_RX_DRAIN_WRID,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment