Commit 1c79a5a8 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-next'

Hariprasad Shenai says:

====================
Doorbell drop Avoidance Bug fix for iw_cxgb4

This patch series provides fixes for Chelsio T4/T5 adapters
related to DB Drop avoidance and other small fix related to keepalive on
iw-cxgb4.

The patches series is created against David Miller's 'net-next' tree.
And includes patches on cxgb4 and iw_cxgb4 driver.

We would like to request this patch series to get merged via David Miller's
'net-next' tree.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 57a7744e 05eb2389
...@@ -1647,6 +1647,15 @@ static inline int act_open_has_tid(int status) ...@@ -1647,6 +1647,15 @@ static inline int act_open_has_tid(int status)
status != CPL_ERR_ARP_MISS; status != CPL_ERR_ARP_MISS;
} }
/* Returns whether a CPL status conveys negative advice.
*/
static int is_neg_adv(unsigned int status)
{
return status == CPL_ERR_RTX_NEG_ADVICE ||
status == CPL_ERR_PERSIST_NEG_ADVICE ||
status == CPL_ERR_KEEPALV_NEG_ADVICE;
}
#define ACT_OPEN_RETRY_COUNT 2 #define ACT_OPEN_RETRY_COUNT 2
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
...@@ -1835,7 +1844,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1835,7 +1844,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status)); status, status2errno(status));
if (status == CPL_ERR_RTX_NEG_ADVICE) { if (is_neg_adv(status)) {
printk(KERN_WARNING MOD "Connection problems for atid %u\n", printk(KERN_WARNING MOD "Connection problems for atid %u\n",
atid); atid);
return 0; return 0;
...@@ -2265,15 +2274,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2265,15 +2274,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
return 0; return 0;
} }
/*
* Returns whether an ABORT_REQ_RSS message is a negative advice.
*/
static int is_neg_adv_abort(unsigned int status)
{
return status == CPL_ERR_RTX_NEG_ADVICE ||
status == CPL_ERR_PERSIST_NEG_ADVICE;
}
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{ {
struct cpl_abort_req_rss *req = cplhdr(skb); struct cpl_abort_req_rss *req = cplhdr(skb);
...@@ -2287,7 +2287,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2287,7 +2287,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int tid = GET_TID(req); unsigned int tid = GET_TID(req);
ep = lookup_tid(t, tid); ep = lookup_tid(t, tid);
if (is_neg_adv_abort(req->status)) { if (is_neg_adv(req->status)) {
PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
ep->hwtid); ep->hwtid);
return 0; return 0;
...@@ -3570,7 +3570,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3570,7 +3570,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
if (is_neg_adv_abort(req->status)) { if (is_neg_adv(req->status)) {
PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
ep->hwtid); ep->hwtid);
kfree_skb(skb); kfree_skb(skb);
......
This diff is collapsed.
...@@ -109,6 +109,7 @@ struct c4iw_dev_ucontext { ...@@ -109,6 +109,7 @@ struct c4iw_dev_ucontext {
enum c4iw_rdev_flags { enum c4iw_rdev_flags {
T4_FATAL_ERROR = (1<<0), T4_FATAL_ERROR = (1<<0),
T4_STATUS_PAGE_DISABLED = (1<<1),
}; };
struct c4iw_stat { struct c4iw_stat {
...@@ -130,6 +131,7 @@ struct c4iw_stats { ...@@ -130,6 +131,7 @@ struct c4iw_stats {
u64 db_empty; u64 db_empty;
u64 db_drop; u64 db_drop;
u64 db_state_transitions; u64 db_state_transitions;
u64 db_fc_interruptions;
u64 tcam_full; u64 tcam_full;
u64 act_ofld_conn_fails; u64 act_ofld_conn_fails;
u64 pas_ofld_conn_fails; u64 pas_ofld_conn_fails;
...@@ -150,6 +152,7 @@ struct c4iw_rdev { ...@@ -150,6 +152,7 @@ struct c4iw_rdev {
unsigned long oc_mw_pa; unsigned long oc_mw_pa;
void __iomem *oc_mw_kva; void __iomem *oc_mw_kva;
struct c4iw_stats stats; struct c4iw_stats stats;
struct t4_dev_status_page *status_page;
}; };
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
...@@ -211,7 +214,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, ...@@ -211,7 +214,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
enum db_state { enum db_state {
NORMAL = 0, NORMAL = 0,
FLOW_CONTROL = 1, FLOW_CONTROL = 1,
RECOVERY = 2 RECOVERY = 2,
STOPPED = 3
}; };
struct c4iw_dev { struct c4iw_dev {
...@@ -225,10 +229,10 @@ struct c4iw_dev { ...@@ -225,10 +229,10 @@ struct c4iw_dev {
struct mutex db_mutex; struct mutex db_mutex;
struct dentry *debugfs_root; struct dentry *debugfs_root;
enum db_state db_state; enum db_state db_state;
int qpcnt;
struct idr hwtid_idr; struct idr hwtid_idr;
struct idr atid_idr; struct idr atid_idr;
struct idr stid_idr; struct idr stid_idr;
struct list_head db_fc_list;
}; };
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
...@@ -432,6 +436,7 @@ struct c4iw_qp_attributes { ...@@ -432,6 +436,7 @@ struct c4iw_qp_attributes {
struct c4iw_qp { struct c4iw_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
struct list_head db_fc_entry;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_ep *ep; struct c4iw_ep *ep;
struct c4iw_qp_attributes attr; struct c4iw_qp_attributes attr;
......
...@@ -106,15 +106,54 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -106,15 +106,54 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
{ {
struct c4iw_ucontext *context; struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev); struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
static int warned;
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
PDBG("%s ibdev %p\n", __func__, ibdev); PDBG("%s ibdev %p\n", __func__, ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL); context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) if (!context) {
return ERR_PTR(-ENOMEM); ret = -ENOMEM;
goto err;
}
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps); INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock); spin_lock_init(&context->mmap_lock);
if (udata->outlen < sizeof(uresp)) {
if (!warned++)
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm)
goto err_free;
uresp.status_page_size = PAGE_SIZE;
spin_lock(&context->mmap_lock);
uresp.status_page_key = context->key;
context->key += PAGE_SIZE;
spin_unlock(&context->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (ret)
goto err_mm;
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
insert_mmap(context, mm);
}
return &context->ibucontext; return &context->ibucontext;
err_mm:
kfree(mm);
err_free:
kfree(context);
err:
return ERR_PTR(ret);
} }
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
......
...@@ -638,6 +638,46 @@ void c4iw_qp_rem_ref(struct ib_qp *qp) ...@@ -638,6 +638,46 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
wake_up(&(to_c4iw_qp(qp)->wait)); wake_up(&(to_c4iw_qp(qp)->wait));
} }
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
{
if (list_empty(entry))
list_add_tail(entry, head);
}
static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
spin_lock_irqsave(&qhp->rhp->lock, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL) {
t4_ring_sq_db(&qhp->wq, inc);
} else {
add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
qhp->wq.sq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&qhp->rhp->lock, flags);
return 0;
}
static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
spin_lock_irqsave(&qhp->rhp->lock, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL) {
t4_ring_rq_db(&qhp->wq, inc);
} else {
add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
qhp->wq.rq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&qhp->rhp->lock, flags);
return 0;
}
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr) struct ib_send_wr **bad_wr)
{ {
...@@ -750,9 +790,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -750,9 +790,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
t4_sq_produce(&qhp->wq, len16); t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
} }
if (t4_wq_db_enabled(&qhp->wq)) if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_sq_db(&qhp->wq, idx); t4_ring_sq_db(&qhp->wq, idx);
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
} else {
spin_unlock_irqrestore(&qhp->lock, flag);
ring_kernel_sq_db(qhp, idx);
}
return err; return err;
} }
...@@ -812,9 +856,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -812,9 +856,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wr = wr->next; wr = wr->next;
num_wrs--; num_wrs--;
} }
if (t4_wq_db_enabled(&qhp->wq)) if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_rq_db(&qhp->wq, idx); t4_ring_rq_db(&qhp->wq, idx);
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
} else {
spin_unlock_irqrestore(&qhp->lock, flag);
ring_kernel_rq_db(qhp, idx);
}
return err; return err;
} }
...@@ -1200,35 +1248,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) ...@@ -1200,35 +1248,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
return ret; return ret;
} }
/*
* Called by the library when the qp has user dbs disabled due to
* a DB_FULL condition. This function will single-thread all user
* DB rings to avoid overflowing the hw db-fifo.
*/
static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
{
int delay = db_delay_usecs;
mutex_lock(&qhp->rhp->db_mutex);
do {
/*
* The interrupt threshold is dbfifo_int_thresh << 6. So
* make sure we don't cross that and generate an interrupt.
*/
if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
(qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
writel(QID(qid) | PIDX(inc), qhp->wq.db);
break;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(delay));
delay = min(delay << 1, 2000);
} while (1);
mutex_unlock(&qhp->rhp->db_mutex);
return 0;
}
int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
enum c4iw_qp_attr_mask mask, enum c4iw_qp_attr_mask mask,
struct c4iw_qp_attributes *attrs, struct c4iw_qp_attributes *attrs,
...@@ -1278,11 +1297,11 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1278,11 +1297,11 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
} }
if (mask & C4IW_QP_ATTR_SQ_DB) { if (mask & C4IW_QP_ATTR_SQ_DB) {
ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc); ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
goto out; goto out;
} }
if (mask & C4IW_QP_ATTR_RQ_DB) { if (mask & C4IW_QP_ATTR_RQ_DB) {
ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc); ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
goto out; goto out;
} }
...@@ -1465,14 +1484,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1465,14 +1484,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
return ret; return ret;
} }
static int enable_qp_db(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
t4_enable_wq_db(&qp->wq);
return 0;
}
int c4iw_destroy_qp(struct ib_qp *ib_qp) int c4iw_destroy_qp(struct ib_qp *ib_qp)
{ {
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
...@@ -1490,22 +1501,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) ...@@ -1490,22 +1501,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep); wait_event(qhp->wait, !qhp->ep);
spin_lock_irq(&rhp->lock); remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
rhp->qpcnt--;
BUG_ON(rhp->qpcnt < 0);
if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
rhp->rdev.stats.db_state_transitions++;
rhp->db_state = NORMAL;
idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
}
if (db_coalescing_threshold >= 0)
if (rhp->qpcnt <= db_coalescing_threshold)
cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
spin_unlock_irq(&rhp->lock);
atomic_dec(&qhp->refcnt); atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
spin_lock_irq(&rhp->lock);
if (!list_empty(&qhp->db_fc_entry))
list_del_init(&qhp->db_fc_entry);
spin_unlock_irq(&rhp->lock);
ucontext = ib_qp->uobject ? ucontext = ib_qp->uobject ?
to_c4iw_ucontext(ib_qp->uobject->context) : NULL; to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
destroy_qp(&rhp->rdev, &qhp->wq, destroy_qp(&rhp->rdev, &qhp->wq,
...@@ -1516,14 +1520,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) ...@@ -1516,14 +1520,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
return 0; return 0;
} }
static int disable_qp_db(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
t4_disable_wq_db(&qp->wq);
return 0;
}
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct ib_udata *udata) struct ib_udata *udata)
{ {
...@@ -1610,20 +1606,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1610,20 +1606,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
init_waitqueue_head(&qhp->wait); init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1); atomic_set(&qhp->refcnt, 1);
spin_lock_irq(&rhp->lock); ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (rhp->db_state != NORMAL)
t4_disable_wq_db(&qhp->wq);
rhp->qpcnt++;
if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
rhp->rdev.stats.db_state_transitions++;
rhp->db_state = FLOW_CONTROL;
idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
}
if (db_coalescing_threshold >= 0)
if (rhp->qpcnt > db_coalescing_threshold)
cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
spin_unlock_irq(&rhp->lock);
if (ret) if (ret)
goto err2; goto err2;
...@@ -1709,6 +1692,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1709,6 +1692,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
} }
qhp->ibqp.qp_num = qhp->wq.sq.qid; qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer)); init_timer(&(qhp->timer));
INIT_LIST_HEAD(&qhp->db_fc_entry);
PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n", PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
__func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.sq.qid); qhp->wq.sq.qid);
......
...@@ -300,6 +300,7 @@ struct t4_sq { ...@@ -300,6 +300,7 @@ struct t4_sq {
u16 cidx; u16 cidx;
u16 pidx; u16 pidx;
u16 wq_pidx; u16 wq_pidx;
u16 wq_pidx_inc;
u16 flags; u16 flags;
short flush_cidx; short flush_cidx;
}; };
...@@ -324,6 +325,7 @@ struct t4_rq { ...@@ -324,6 +325,7 @@ struct t4_rq {
u16 cidx; u16 cidx;
u16 pidx; u16 pidx;
u16 wq_pidx; u16 wq_pidx;
u16 wq_pidx_inc;
}; };
struct t4_wq { struct t4_wq {
...@@ -609,3 +611,7 @@ static inline void t4_set_cq_in_error(struct t4_cq *cq) ...@@ -609,3 +611,7 @@ static inline void t4_set_cq_in_error(struct t4_cq *cq)
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
} }
#endif #endif
struct t4_dev_status_page {
u8 db_off;
};
...@@ -70,4 +70,9 @@ struct c4iw_create_qp_resp { ...@@ -70,4 +70,9 @@ struct c4iw_create_qp_resp {
__u32 qid_mask; __u32 qid_mask;
__u32 flags; __u32 flags;
}; };
struct c4iw_alloc_ucontext_resp {
__u64 status_page_key;
__u32 status_page_size;
};
#endif #endif
...@@ -500,6 +500,7 @@ struct sge_txq { ...@@ -500,6 +500,7 @@ struct sge_txq {
spinlock_t db_lock; spinlock_t db_lock;
int db_disabled; int db_disabled;
unsigned short db_pidx; unsigned short db_pidx;
unsigned short db_pidx_inc;
u64 udb; u64 udb;
}; };
......
...@@ -3578,14 +3578,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs) ...@@ -3578,14 +3578,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
static void disable_txq_db(struct sge_txq *q) static void disable_txq_db(struct sge_txq *q)
{ {
spin_lock_irq(&q->db_lock); unsigned long flags;
spin_lock_irqsave(&q->db_lock, flags);
q->db_disabled = 1; q->db_disabled = 1;
spin_unlock_irq(&q->db_lock); spin_unlock_irqrestore(&q->db_lock, flags);
} }
static void enable_txq_db(struct sge_txq *q) static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
{ {
spin_lock_irq(&q->db_lock); spin_lock_irq(&q->db_lock);
if (q->db_pidx_inc) {
/* Make sure that all writes to the TX descriptors
* are committed before we tell HW about them.
*/
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
q->db_pidx_inc = 0;
}
q->db_disabled = 0; q->db_disabled = 0;
spin_unlock_irq(&q->db_lock); spin_unlock_irq(&q->db_lock);
} }
...@@ -3607,11 +3618,32 @@ static void enable_dbs(struct adapter *adap) ...@@ -3607,11 +3618,32 @@ static void enable_dbs(struct adapter *adap)
int i; int i;
for_each_ethrxq(&adap->sge, i) for_each_ethrxq(&adap->sge, i)
enable_txq_db(&adap->sge.ethtxq[i].q); enable_txq_db(adap, &adap->sge.ethtxq[i].q);
for_each_ofldrxq(&adap->sge, i) for_each_ofldrxq(&adap->sge, i)
enable_txq_db(&adap->sge.ofldtxq[i].q); enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i) for_each_port(adap, i)
enable_txq_db(&adap->sge.ctrlq[i].q); enable_txq_db(adap, &adap->sge.ctrlq[i].q);
}
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
if (adap->uld_handle[CXGB4_ULD_RDMA])
ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
cmd);
}
static void process_db_full(struct work_struct *work)
{
struct adapter *adap;
adap = container_of(work, struct adapter, db_full_task);
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT,
DBFIFO_HP_INT | DBFIFO_LP_INT);
} }
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
...@@ -3619,7 +3651,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) ...@@ -3619,7 +3651,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
u16 hw_pidx, hw_cidx; u16 hw_pidx, hw_cidx;
int ret; int ret;
spin_lock_bh(&q->db_lock); spin_lock_irq(&q->db_lock);
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
if (ret) if (ret)
goto out; goto out;
...@@ -3636,7 +3668,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) ...@@ -3636,7 +3668,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
} }
out: out:
q->db_disabled = 0; q->db_disabled = 0;
spin_unlock_bh(&q->db_lock); q->db_pidx_inc = 0;
spin_unlock_irq(&q->db_lock);
if (ret) if (ret)
CH_WARN(adap, "DB drop recovery failed.\n"); CH_WARN(adap, "DB drop recovery failed.\n");
} }
...@@ -3652,29 +3685,6 @@ static void recover_all_queues(struct adapter *adap) ...@@ -3652,29 +3685,6 @@ static void recover_all_queues(struct adapter *adap)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
} }
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
mutex_lock(&uld_mutex);
if (adap->uld_handle[CXGB4_ULD_RDMA])
ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
cmd);
mutex_unlock(&uld_mutex);
}
static void process_db_full(struct work_struct *work)
{
struct adapter *adap;
adap = container_of(work, struct adapter, db_full_task);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
drain_db_fifo(adap, dbfifo_drain_delay);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT,
DBFIFO_HP_INT | DBFIFO_LP_INT);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
}
static void process_db_drop(struct work_struct *work) static void process_db_drop(struct work_struct *work)
{ {
struct adapter *adap; struct adapter *adap;
...@@ -3682,11 +3692,13 @@ static void process_db_drop(struct work_struct *work) ...@@ -3682,11 +3692,13 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task); adap = container_of(work, struct adapter, db_drop_task);
if (is_t4(adap->params.chip)) { if (is_t4(adap->params.chip)) {
disable_dbs(adap); drain_db_fifo(adap, dbfifo_drain_delay);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
drain_db_fifo(adap, 1); drain_db_fifo(adap, dbfifo_drain_delay);
recover_all_queues(adap); recover_all_queues(adap);
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap); enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
} else { } else {
u32 dropped_db = t4_read_reg(adap, 0x010ac); u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff; u16 qid = (dropped_db >> 15) & 0x1ffff;
...@@ -3727,6 +3739,8 @@ static void process_db_drop(struct work_struct *work) ...@@ -3727,6 +3739,8 @@ static void process_db_drop(struct work_struct *work)
void t4_db_full(struct adapter *adap) void t4_db_full(struct adapter *adap)
{ {
if (is_t4(adap->params.chip)) { if (is_t4(adap->params.chip)) {
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
t4_set_reg_field(adap, SGE_INT_ENABLE3, t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0); DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task); queue_work(workq, &adap->db_full_task);
...@@ -3735,8 +3749,11 @@ void t4_db_full(struct adapter *adap) ...@@ -3735,8 +3749,11 @@ void t4_db_full(struct adapter *adap)
void t4_db_dropped(struct adapter *adap) void t4_db_dropped(struct adapter *adap)
{ {
if (is_t4(adap->params.chip)) if (is_t4(adap->params.chip)) {
queue_work(workq, &adap->db_drop_task); disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
}
queue_work(workq, &adap->db_drop_task);
} }
static void uld_attach(struct adapter *adap, unsigned int uld) static void uld_attach(struct adapter *adap, unsigned int uld)
......
...@@ -860,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) ...@@ -860,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{ {
unsigned int *wr, index; unsigned int *wr, index;
unsigned long flags;
wmb(); /* write descriptors before telling HW */ wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock); spin_lock_irqsave(&q->db_lock, flags);
if (!q->db_disabled) { if (!q->db_disabled) {
if (is_t4(adap->params.chip)) { if (is_t4(adap->params.chip)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
...@@ -878,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) ...@@ -878,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
writel(n, adap->bar2 + q->udb + 8); writel(n, adap->bar2 + q->udb + 8);
wmb(); wmb();
} }
} } else
q->db_pidx_inc += n;
q->db_pidx = q->pidx; q->db_pidx = q->pidx;
spin_unlock(&q->db_lock); spin_unlock_irqrestore(&q->db_lock, flags);
} }
/** /**
......
...@@ -116,6 +116,7 @@ enum CPL_error { ...@@ -116,6 +116,7 @@ enum CPL_error {
CPL_ERR_KEEPALIVE_TIMEDOUT = 34, CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
CPL_ERR_RTX_NEG_ADVICE = 35, CPL_ERR_RTX_NEG_ADVICE = 35,
CPL_ERR_PERSIST_NEG_ADVICE = 36, CPL_ERR_PERSIST_NEG_ADVICE = 36,
CPL_ERR_KEEPALV_NEG_ADVICE = 37,
CPL_ERR_ABORT_FAILED = 42, CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_IWARP_FLM = 50, CPL_ERR_IWARP_FLM = 50,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment