Commit 1df9fad1 authored by Roland Dreier's avatar Roland Dreier

Merge branches 'cma', 'cxgb4' and 'qib' into for-next

...@@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
} }
PDBG("%s ep %p status %d error %d\n", __func__, ep, PDBG("%s ep %p status %d error %d\n", __func__, ep,
rpl->status, status2errno(rpl->status)); rpl->status, status2errno(rpl->status));
ep->com.wr_wait.ret = status2errno(rpl->status); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
ep->com.wr_wait.done = 1;
wake_up(&ep->com.wr_wait.wait);
return 0; return 0;
} }
...@@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = lookup_stid(t, stid); struct c4iw_listen_ep *ep = lookup_stid(t, stid);
PDBG("%s ep %p\n", __func__, ep); PDBG("%s ep %p\n", __func__, ep);
ep->com.wr_wait.ret = status2errno(rpl->status); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
ep->com.wr_wait.done = 1;
wake_up(&ep->com.wr_wait.wait);
return 0; return 0;
} }
...@@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs;
int disconnect = 1; int disconnect = 1;
int release = 0; int release = 0;
int closing = 0; int abort = 0;
struct tid_info *t = dev->rdev.lldi.tids; struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr); unsigned int tid = GET_TID(hdr);
...@@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
* in rdma connection migration (see c4iw_accept_cr()). * in rdma connection migration (see c4iw_accept_cr()).
*/ */
__state_set(&ep->com, CLOSING); __state_set(&ep->com, CLOSING);
ep->com.wr_wait.done = 1;
ep->com.wr_wait.ret = -ECONNRESET;
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
wake_up(&ep->com.wr_wait.wait); c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
break; break;
case MPA_REP_SENT: case MPA_REP_SENT:
__state_set(&ep->com, CLOSING); __state_set(&ep->com, CLOSING);
ep->com.wr_wait.done = 1;
ep->com.wr_wait.ret = -ECONNRESET;
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
wake_up(&ep->com.wr_wait.wait); c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
break; break;
case FPDU_MODE: case FPDU_MODE:
start_ep_timer(ep); start_ep_timer(ep);
__state_set(&ep->com, CLOSING); __state_set(&ep->com, CLOSING);
closing = 1; attrs.next_state = C4IW_QP_STATE_CLOSING;
abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
peer_close_upcall(ep); peer_close_upcall(ep);
disconnect = 1;
break; break;
case ABORTING: case ABORTING:
disconnect = 0; disconnect = 0;
...@@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(1); BUG_ON(1);
} }
mutex_unlock(&ep->com.mutex); mutex_unlock(&ep->com.mutex);
if (closing) {
attrs.next_state = C4IW_QP_STATE_CLOSING;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
if (disconnect) if (disconnect)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL); c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
if (release) if (release)
...@@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
/* /*
* Wake up any threads in rdma_init() or rdma_fini(). * Wake up any threads in rdma_init() or rdma_fini().
*/ */
ep->com.wr_wait.done = 1; c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
ep->com.wr_wait.ret = -ECONNRESET;
wake_up(&ep->com.wr_wait.wait);
mutex_lock(&ep->com.mutex); mutex_lock(&ep->com.mutex);
switch (ep->com.state) { switch (ep->com.state) {
...@@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid); ep = lookup_tid(t, tid);
BUG_ON(!ep); BUG_ON(!ep);
if (ep->com.qp) { if (ep && ep->com.qp) {
printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
ep->com.qp->wq.sq.qid); ep->com.qp->wq.sq.qid);
attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
} else } else
printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
return 0; return 0;
} }
...@@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
if (wr_waitp) { if (wr_waitp)
if (ret) c4iw_wake_up(wr_waitp, ret ? -ret : 0);
wr_waitp->ret = -ret;
else
wr_waitp->ret = 0;
wr_waitp->done = 1;
wake_up(&wr_waitp->wait);
}
kfree_skb(skb); kfree_skb(skb);
break; break;
case 2: case 2:
......
...@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); ...@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION); MODULE_VERSION(DRV_VERSION);
static LIST_HEAD(dev_list); static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex); static DEFINE_MUTEX(dev_mutex);
static struct dentry *c4iw_debugfs_root; static struct dentry *c4iw_debugfs_root;
...@@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) ...@@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
c4iw_destroy_resource(&rdev->resource); c4iw_destroy_resource(&rdev->resource);
} }
static void c4iw_remove(struct c4iw_dev *dev) struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
struct c4iw_dev *dev;
};
static void c4iw_remove(struct uld_ctx *ctx)
{ {
PDBG("%s c4iw_dev %p\n", __func__, dev); PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
list_del(&dev->entry); c4iw_unregister_device(ctx->dev);
if (dev->registered) c4iw_rdev_close(&ctx->dev->rdev);
c4iw_unregister_device(dev); idr_destroy(&ctx->dev->cqidr);
c4iw_rdev_close(&dev->rdev); idr_destroy(&ctx->dev->qpidr);
idr_destroy(&dev->cqidr); idr_destroy(&ctx->dev->mmidr);
idr_destroy(&dev->qpidr); iounmap(ctx->dev->rdev.oc_mw_kva);
idr_destroy(&dev->mmidr); ib_dealloc_device(&ctx->dev->ibdev);
iounmap(dev->rdev.oc_mw_kva); ctx->dev = NULL;
ib_dealloc_device(&dev->ibdev);
} }
static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
...@@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) { if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n"); printk(KERN_ERR MOD "Cannot allocate ib device\n");
return NULL; return ERR_PTR(-ENOMEM);
} }
devp->rdev.lldi = *infop; devp->rdev.lldi = *infop;
...@@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
devp->rdev.lldi.vr->ocq.size); devp->rdev.lldi.vr->ocq.size);
printk(KERN_INFO MOD "ocq memory: " PDBG(KERN_INFO MOD "ocq memory: "
"hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
mutex_lock(&dev_mutex);
ret = c4iw_rdev_open(&devp->rdev); ret = c4iw_rdev_open(&devp->rdev);
if (ret) { if (ret) {
mutex_unlock(&dev_mutex); mutex_unlock(&dev_mutex);
printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
ib_dealloc_device(&devp->ibdev); ib_dealloc_device(&devp->ibdev);
return NULL; return ERR_PTR(ret);
} }
idr_init(&devp->cqidr); idr_init(&devp->cqidr);
idr_init(&devp->qpidr); idr_init(&devp->qpidr);
idr_init(&devp->mmidr); idr_init(&devp->mmidr);
spin_lock_init(&devp->lock); spin_lock_init(&devp->lock);
list_add_tail(&devp->entry, &dev_list);
mutex_unlock(&dev_mutex);
if (c4iw_debugfs_root) { if (c4iw_debugfs_root) {
devp->debugfs_root = debugfs_create_dir( devp->debugfs_root = debugfs_create_dir(
...@@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
{ {
struct c4iw_dev *dev; struct uld_ctx *ctx;
static int vers_printed; static int vers_printed;
int i; int i;
...@@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) ...@@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
DRV_VERSION); DRV_VERSION);
dev = c4iw_alloc(infop); ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!dev) if (!ctx) {
ctx = ERR_PTR(-ENOMEM);
goto out; goto out;
}
ctx->lldi = *infop;
PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
__func__, pci_name(dev->rdev.lldi.pdev), __func__, pci_name(ctx->lldi.pdev),
dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, ctx->lldi.nchan, ctx->lldi.nrxq,
dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); ctx->lldi.ntxq, ctx->lldi.nports);
mutex_lock(&dev_mutex);
list_add_tail(&ctx->entry, &uld_ctx_list);
mutex_unlock(&dev_mutex);
for (i = 0; i < dev->rdev.lldi.nrxq; i++) for (i = 0; i < ctx->lldi.nrxq; i++)
PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
out: out:
return dev; return ctx;
} }
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *gl) const struct pkt_gl *gl)
{ {
struct c4iw_dev *dev = handle; struct uld_ctx *ctx = handle;
struct c4iw_dev *dev = ctx->dev;
struct sk_buff *skb; struct sk_buff *skb;
const struct cpl_act_establish *rpl; const struct cpl_act_establish *rpl;
unsigned int opcode; unsigned int opcode;
...@@ -503,47 +512,49 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, ...@@ -503,47 +512,49 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{ {
struct c4iw_dev *dev = handle; struct uld_ctx *ctx = handle;
PDBG("%s new_state %u\n", __func__, new_state); PDBG("%s new_state %u\n", __func__, new_state);
switch (new_state) { switch (new_state) {
case CXGB4_STATE_UP: case CXGB4_STATE_UP:
printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
if (!dev->registered) { if (!ctx->dev) {
int ret; int ret = 0;
ret = c4iw_register_device(dev);
if (ret) ctx->dev = c4iw_alloc(&ctx->lldi);
if (!IS_ERR(ctx->dev))
ret = c4iw_register_device(ctx->dev);
if (IS_ERR(ctx->dev) || ret)
printk(KERN_ERR MOD printk(KERN_ERR MOD
"%s: RDMA registration failed: %d\n", "%s: RDMA registration failed: %d\n",
pci_name(dev->rdev.lldi.pdev), ret); pci_name(ctx->lldi.pdev), ret);
} }
break; break;
case CXGB4_STATE_DOWN: case CXGB4_STATE_DOWN:
printk(KERN_INFO MOD "%s: Down\n", printk(KERN_INFO MOD "%s: Down\n",
pci_name(dev->rdev.lldi.pdev)); pci_name(ctx->lldi.pdev));
if (dev->registered) if (ctx->dev)
c4iw_unregister_device(dev); c4iw_remove(ctx);
break; break;
case CXGB4_STATE_START_RECOVERY: case CXGB4_STATE_START_RECOVERY:
printk(KERN_INFO MOD "%s: Fatal Error\n", printk(KERN_INFO MOD "%s: Fatal Error\n",
pci_name(dev->rdev.lldi.pdev)); pci_name(ctx->lldi.pdev));
dev->rdev.flags |= T4_FATAL_ERROR; if (ctx->dev) {
if (dev->registered) {
struct ib_event event; struct ib_event event;
ctx->dev->rdev.flags |= T4_FATAL_ERROR;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
event.event = IB_EVENT_DEVICE_FATAL; event.event = IB_EVENT_DEVICE_FATAL;
event.device = &dev->ibdev; event.device = &ctx->dev->ibdev;
ib_dispatch_event(&event); ib_dispatch_event(&event);
c4iw_unregister_device(dev); c4iw_remove(ctx);
} }
break; break;
case CXGB4_STATE_DETACH: case CXGB4_STATE_DETACH:
printk(KERN_INFO MOD "%s: Detach\n", printk(KERN_INFO MOD "%s: Detach\n",
pci_name(dev->rdev.lldi.pdev)); pci_name(ctx->lldi.pdev));
mutex_lock(&dev_mutex); if (ctx->dev)
c4iw_remove(dev); c4iw_remove(ctx);
mutex_unlock(&dev_mutex);
break; break;
} }
return 0; return 0;
...@@ -576,11 +587,13 @@ static int __init c4iw_init_module(void) ...@@ -576,11 +587,13 @@ static int __init c4iw_init_module(void)
static void __exit c4iw_exit_module(void) static void __exit c4iw_exit_module(void)
{ {
struct c4iw_dev *dev, *tmp; struct uld_ctx *ctx, *tmp;
mutex_lock(&dev_mutex); mutex_lock(&dev_mutex);
list_for_each_entry_safe(dev, tmp, &dev_list, entry) { list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
c4iw_remove(dev); if (ctx->dev)
c4iw_remove(ctx);
kfree(ctx);
} }
mutex_unlock(&dev_mutex); mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_RDMA); cxgb4_unregister_uld(CXGB4_ULD_RDMA);
......
...@@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) ...@@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
#define C4IW_WR_TO (10*HZ) #define C4IW_WR_TO (10*HZ)
enum {
REPLY_READY = 0,
};
struct c4iw_wr_wait { struct c4iw_wr_wait {
wait_queue_head_t wait; wait_queue_head_t wait;
int done; unsigned long status;
int ret; int ret;
}; };
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
{ {
wr_waitp->ret = 0; wr_waitp->ret = 0;
wr_waitp->done = 0; wr_waitp->status = 0;
init_waitqueue_head(&wr_waitp->wait); init_waitqueue_head(&wr_waitp->wait);
} }
static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
{
wr_waitp->ret = ret;
set_bit(REPLY_READY, &wr_waitp->status);
wake_up(&wr_waitp->wait);
}
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
struct c4iw_wr_wait *wr_waitp, struct c4iw_wr_wait *wr_waitp,
u32 hwtid, u32 qpid, u32 hwtid, u32 qpid,
const char *func) const char *func)
{ {
unsigned to = C4IW_WR_TO; unsigned to = C4IW_WR_TO;
do { int ret;
wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); do {
if (!wr_waitp->done) { ret = wait_event_timeout(wr_waitp->wait,
test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
if (!ret) {
printk(KERN_ERR MOD "%s - Device %s not responding - " printk(KERN_ERR MOD "%s - Device %s not responding - "
"tid %u qpid %u\n", func, "tid %u qpid %u\n", func,
pci_name(rdev->lldi.pdev), hwtid, qpid); pci_name(rdev->lldi.pdev), hwtid, qpid);
if (c4iw_fatal_error(rdev)) {
wr_waitp->ret = -EIO;
break;
}
to = to << 2; to = to << 2;
} }
} while (!wr_waitp->done); } while (!ret);
if (wr_waitp->ret) if (wr_waitp->ret)
printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", PDBG("%s: FW reply %d tid %u qpid %u\n",
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
return wr_waitp->ret; return wr_waitp->ret;
} }
struct c4iw_dev { struct c4iw_dev {
struct ib_device ibdev; struct ib_device ibdev;
struct c4iw_rdev rdev; struct c4iw_rdev rdev;
...@@ -175,9 +191,7 @@ struct c4iw_dev { ...@@ -175,9 +191,7 @@ struct c4iw_dev {
struct idr qpidr; struct idr qpidr;
struct idr mmidr; struct idr mmidr;
spinlock_t lock; spinlock_t lock;
struct list_head entry;
struct dentry *debugfs_root; struct dentry *debugfs_root;
u8 registered;
}; };
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
......
...@@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev) ...@@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
if (ret) if (ret)
goto bail2; goto bail2;
} }
dev->registered = 1;
return 0; return 0;
bail2: bail2:
ib_unregister_device(&dev->ibdev); ib_unregister_device(&dev->ibdev);
...@@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev) ...@@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
c4iw_class_attributes[i]); c4iw_class_attributes[i]);
ib_unregister_device(&dev->ibdev); ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm); kfree(dev->ibdev.iwcm);
dev->registered = 0;
return; return;
} }
...@@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
V_FW_RI_RES_WR_IQID(scq->cqid)); V_FW_RI_RES_WR_IQID(scq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCAEN(0) |
...@@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
if (ret) { if (ret) {
if (internal) if (internal)
c4iw_get_ep(&qhp->ep->com); c4iw_get_ep(&qhp->ep->com);
disconnect = abort = 1;
goto err; goto err;
} }
break; break;
......
...@@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, ...@@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
struct ipath_devdata *dd; struct ipath_devdata *dd;
unsigned long long addr; unsigned long long addr;
u32 bar0 = 0, bar1 = 0; u32 bar0 = 0, bar1 = 0;
u8 rev;
dd = ipath_alloc_devdata(pdev); dd = ipath_alloc_devdata(pdev);
if (IS_ERR(dd)) { if (IS_ERR(dd)) {
...@@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, ...@@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
goto bail_regions; goto bail_regions;
} }
ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); dd->ipath_pcirev = pdev->revision;
if (ret) {
ipath_dev_err(dd, "Failed to read PCI revision ID unit "
"%u: err %d\n", dd->ipath_unit, -ret);
goto bail_regions; /* shouldn't ever happen */
}
dd->ipath_pcirev = rev;
#if defined(__powerpc__) #if defined(__powerpc__)
/* There isn't a generic way to specify writethrough mappings */ /* There isn't a generic way to specify writethrough mappings */
......
...@@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) ...@@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
tstart = get_jiffies_64(); tstart = get_jiffies_64();
while (chan_done && while (chan_done &&
!time_after64(tstart, tstart + msecs_to_jiffies(500))) { !time_after64(get_jiffies_64(),
tstart + msecs_to_jiffies(500))) {
msleep(20); msleep(20);
for (chan = 0; chan < SERDES_CHANS; ++chan) { for (chan = 0; chan < SERDES_CHANS; ++chan) {
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
......
...@@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) ...@@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
*/ */
devid = parent->device; devid = parent->device;
if (devid >= 0x25e2 && devid <= 0x25fa) { if (devid >= 0x25e2 && devid <= 0x25fa) {
u8 rev;
/* 5000 P/V/X/Z */ /* 5000 P/V/X/Z */
pci_read_config_byte(parent, PCI_REVISION_ID, &rev); if (parent->revision <= 0xb2)
if (rev <= 0xb2)
bits = 1U << 10; bits = 1U << 10;
else else
bits = 7U << 10; bits = 7U << 10;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment