Commit 216fe702 authored by Roland Dreier's avatar Roland Dreier

Merge branch 'cxgb3' into for-next

parents e8094e66 68baf495
...@@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, ...@@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
udelay(1); udelay(1);
if (i++ > 1000000) { if (i++ > 1000000) {
BUG_ON(1);
printk(KERN_ERR "%s: stalled rnic\n", printk(KERN_ERR "%s: stalled rnic\n",
rdev_p->dev_name); rdev_p->dev_name);
return -EIO; return -EIO;
...@@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) ...@@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
} }
int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
{ {
struct rdma_cq_setup setup; struct rdma_cq_setup setup;
int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
...@@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) ...@@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
if (!cq->cqid) if (!cq->cqid)
return -ENOMEM; return -ENOMEM;
cq->sw_queue = kzalloc(size, GFP_KERNEL); if (kernel) {
if (!cq->sw_queue) cq->sw_queue = kzalloc(size, GFP_KERNEL);
return -ENOMEM; if (!cq->sw_queue)
cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), return -ENOMEM;
(1UL << (cq->size_log2)) * }
sizeof(struct t3_cqe), cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
&(cq->dma_addr), GFP_KERNEL); &(cq->dma_addr), GFP_KERNEL);
if (!cq->queue) { if (!cq->queue) {
kfree(cq->sw_queue); kfree(cq->sw_queue);
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
#define T3_MAX_PBL_SIZE 256 #define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024 #define T3_MAX_RQ_SIZE 1024
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
#define T3_MAX_CQ_DEPTH 8192 #define T3_MAX_CQ_DEPTH 262144
#define T3_MAX_NUM_STAG (1<<15) #define T3_MAX_NUM_STAG (1<<15)
#define T3_MAX_MR_SIZE 0x100000000ULL #define T3_MAX_MR_SIZE 0x100000000ULL
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
...@@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev); ...@@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev);
void cxio_rdev_close(struct cxio_rdev *rdev); void cxio_rdev_close(struct cxio_rdev *rdev);
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
enum t3_cq_opcode op, u32 credit); enum t3_cq_opcode op, u32 credit);
int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx); void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
......
...@@ -730,7 +730,22 @@ struct t3_cq { ...@@ -730,7 +730,22 @@ struct t3_cq {
static inline void cxio_set_wq_in_error(struct t3_wq *wq) static inline void cxio_set_wq_in_error(struct t3_wq *wq)
{ {
wq->queue->wq_in_err.err = 1; wq->queue->wq_in_err.err |= 1;
}
static inline void cxio_disable_wq_db(struct t3_wq *wq)
{
wq->queue->wq_in_err.err |= 2;
}
static inline void cxio_enable_wq_db(struct t3_wq *wq)
{
wq->queue->wq_in_err.err &= ~2;
}
static inline int cxio_wq_db_enabled(struct t3_wq *wq)
{
return !(wq->queue->wq_in_err.err & 2);
} }
static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
......
...@@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = { ...@@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = {
static LIST_HEAD(dev_list); static LIST_HEAD(dev_list);
static DEFINE_MUTEX(dev_mutex); static DEFINE_MUTEX(dev_mutex);
static int disable_qp_db(int id, void *p, void *data)
{
struct iwch_qp *qhp = p;
cxio_disable_wq_db(&qhp->wq);
return 0;
}
static int enable_qp_db(int id, void *p, void *data)
{
struct iwch_qp *qhp = p;
if (data)
ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
cxio_enable_wq_db(&qhp->wq);
return 0;
}
static void disable_dbs(struct iwch_dev *rnicp)
{
spin_lock_irq(&rnicp->lock);
idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
spin_unlock_irq(&rnicp->lock);
}
static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
{
spin_lock_irq(&rnicp->lock);
idr_for_each(&rnicp->qpidr, enable_qp_db,
(void *)(unsigned long)ring_db);
spin_unlock_irq(&rnicp->lock);
}
static void iwch_db_drop_task(struct work_struct *work)
{
struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
db_drop_task.work);
enable_dbs(rnicp, 1);
}
static void rnic_init(struct iwch_dev *rnicp) static void rnic_init(struct iwch_dev *rnicp)
{ {
PDBG("%s iwch_dev %p\n", __func__, rnicp); PDBG("%s iwch_dev %p\n", __func__, rnicp);
...@@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp) ...@@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp)
idr_init(&rnicp->qpidr); idr_init(&rnicp->qpidr);
idr_init(&rnicp->mmidr); idr_init(&rnicp->mmidr);
spin_lock_init(&rnicp->lock); spin_lock_init(&rnicp->lock);
INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
...@@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev) ...@@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev)
mutex_lock(&dev_mutex); mutex_lock(&dev_mutex);
list_for_each_entry_safe(dev, tmp, &dev_list, entry) { list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
if (dev->rdev.t3cdev_p == tdev) { if (dev->rdev.t3cdev_p == tdev) {
dev->rdev.flags = CXIO_ERROR_FATAL;
cancel_delayed_work_sync(&dev->db_drop_task);
list_del(&dev->entry); list_del(&dev->entry);
iwch_unregister_device(dev); iwch_unregister_device(dev);
cxio_rdev_close(&dev->rdev); cxio_rdev_close(&dev->rdev);
...@@ -165,7 +208,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) ...@@ -165,7 +208,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
struct cxio_rdev *rdev = tdev->ulp; struct cxio_rdev *rdev = tdev->ulp;
struct iwch_dev *rnicp; struct iwch_dev *rnicp;
struct ib_event event; struct ib_event event;
u32 portnum = port_id + 1; u32 portnum = port_id + 1;
int dispatch = 0;
if (!rdev) if (!rdev)
return; return;
...@@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) ...@@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
case OFFLOAD_STATUS_DOWN: { case OFFLOAD_STATUS_DOWN: {
rdev->flags = CXIO_ERROR_FATAL; rdev->flags = CXIO_ERROR_FATAL;
event.event = IB_EVENT_DEVICE_FATAL; event.event = IB_EVENT_DEVICE_FATAL;
dispatch = 1;
break; break;
} }
case OFFLOAD_PORT_DOWN: { case OFFLOAD_PORT_DOWN: {
event.event = IB_EVENT_PORT_ERR; event.event = IB_EVENT_PORT_ERR;
dispatch = 1;
break; break;
} }
case OFFLOAD_PORT_UP: { case OFFLOAD_PORT_UP: {
event.event = IB_EVENT_PORT_ACTIVE; event.event = IB_EVENT_PORT_ACTIVE;
dispatch = 1;
break;
}
case OFFLOAD_DB_FULL: {
disable_dbs(rnicp);
break;
}
case OFFLOAD_DB_EMPTY: {
enable_dbs(rnicp, 1);
break;
}
case OFFLOAD_DB_DROP: {
unsigned long delay = 1000;
unsigned short r;
disable_dbs(rnicp);
get_random_bytes(&r, 2);
delay += r & 1023;
/*
* delay is between 1000-2023 usecs.
*/
schedule_delayed_work(&rnicp->db_drop_task,
usecs_to_jiffies(delay));
break; break;
} }
} }
event.device = &rnicp->ibdev; if (dispatch) {
event.element.port_num = portnum; event.device = &rnicp->ibdev;
ib_dispatch_event(&event); event.element.port_num = portnum;
ib_dispatch_event(&event);
}
return; return;
} }
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/workqueue.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
...@@ -110,6 +111,7 @@ struct iwch_dev { ...@@ -110,6 +111,7 @@ struct iwch_dev {
struct idr mmidr; struct idr mmidr;
spinlock_t lock; spinlock_t lock;
struct list_head entry; struct list_head entry;
struct delayed_work db_drop_task;
}; };
static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
......
...@@ -187,7 +187,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve ...@@ -187,7 +187,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
entries = roundup_pow_of_two(entries); entries = roundup_pow_of_two(entries);
chp->cq.size_log2 = ilog2(entries); chp->cq.size_log2 = ilog2(entries);
if (cxio_create_cq(&rhp->rdev, &chp->cq)) { if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
kfree(chp); kfree(chp);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
......
...@@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
++(qhp->wq.sq_wptr); ++(qhp->wq.sq_wptr);
} }
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); if (cxio_wq_db_enabled(&qhp->wq))
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
out: out:
if (err) if (err)
...@@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
num_wrs--; num_wrs--;
} }
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); if (cxio_wq_db_enabled(&qhp->wq))
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
out: out:
if (err) if (err)
...@@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp, ...@@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp,
++(qhp->wq.sq_wptr); ++(qhp->wq.sq_wptr);
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); if (cxio_wq_db_enabled(&qhp->wq))
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
return err; return err;
} }
......
...@@ -264,6 +264,10 @@ struct adapter { ...@@ -264,6 +264,10 @@ struct adapter {
struct work_struct fatal_error_handler_task; struct work_struct fatal_error_handler_task;
struct work_struct link_fault_handler_task; struct work_struct link_fault_handler_task;
struct work_struct db_full_task;
struct work_struct db_empty_task;
struct work_struct db_drop_task;
struct dentry *debugfs_root; struct dentry *debugfs_root;
struct mutex mdio_lock; struct mutex mdio_lock;
...@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, ...@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data); unsigned char *data);
irqreturn_t t3_sge_intr_msix(int irq, void *cookie); irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
extern struct workqueue_struct *cxgb3_wq;
int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/sched.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "common.h" #include "common.h"
...@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not"); ...@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
* will block keventd as it needs the rtnl lock, and we'll deadlock waiting * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
* for our work to complete. Get our own work queue to solve this. * for our work to complete. Get our own work queue to solve this.
*/ */
static struct workqueue_struct *cxgb3_wq; struct workqueue_struct *cxgb3_wq;
/** /**
* link_report - show link status and link speed/duplex * link_report - show link status and link speed/duplex
...@@ -590,6 +591,19 @@ static void setup_rss(struct adapter *adap) ...@@ -590,6 +591,19 @@ static void setup_rss(struct adapter *adap)
V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
} }
static void ring_dbs(struct adapter *adap)
{
int i, j;
for (i = 0; i < SGE_QSETS; i++) {
struct sge_qset *qs = &adap->sge.qs[i];
if (qs->adap)
for (j = 0; j < SGE_TXQ_PER_SET; j++)
t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
}
}
static void init_napi(struct adapter *adap) static void init_napi(struct adapter *adap)
{ {
int i; int i;
...@@ -2754,6 +2768,42 @@ static void t3_adap_check_task(struct work_struct *work) ...@@ -2754,6 +2768,42 @@ static void t3_adap_check_task(struct work_struct *work)
spin_unlock_irq(&adapter->work_lock); spin_unlock_irq(&adapter->work_lock);
} }
static void db_full_task(struct work_struct *work)
{
struct adapter *adapter = container_of(work, struct adapter,
db_full_task);
cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
}
static void db_empty_task(struct work_struct *work)
{
struct adapter *adapter = container_of(work, struct adapter,
db_empty_task);
cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
}
static void db_drop_task(struct work_struct *work)
{
struct adapter *adapter = container_of(work, struct adapter,
db_drop_task);
unsigned long delay = 1000;
unsigned short r;
cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
/*
* Sleep a while before ringing the driver qset dbs.
* The delay is between 1000-2023 usecs.
*/
get_random_bytes(&r, 2);
delay += r & 1023;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(delay));
ring_dbs(adapter);
}
/* /*
* Processes external (PHY) interrupts in process context. * Processes external (PHY) interrupts in process context.
*/ */
...@@ -3222,6 +3272,11 @@ static int __devinit init_one(struct pci_dev *pdev, ...@@ -3222,6 +3272,11 @@ static int __devinit init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&adapter->adapter_list); INIT_LIST_HEAD(&adapter->adapter_list);
INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
INIT_WORK(&adapter->db_full_task, db_full_task);
INIT_WORK(&adapter->db_empty_task, db_empty_task);
INIT_WORK(&adapter->db_drop_task, db_drop_task);
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
for (i = 0; i < ai->nports0 + ai->nports1; ++i) { for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
......
...@@ -73,7 +73,10 @@ enum { ...@@ -73,7 +73,10 @@ enum {
OFFLOAD_STATUS_UP, OFFLOAD_STATUS_UP,
OFFLOAD_STATUS_DOWN, OFFLOAD_STATUS_DOWN,
OFFLOAD_PORT_DOWN, OFFLOAD_PORT_DOWN,
OFFLOAD_PORT_UP OFFLOAD_PORT_UP,
OFFLOAD_DB_FULL,
OFFLOAD_DB_EMPTY,
OFFLOAD_DB_DROP
}; };
struct cxgb3_client { struct cxgb3_client {
......
...@@ -254,6 +254,22 @@ ...@@ -254,6 +254,22 @@
#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) #define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) #define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
#define S_HIPRIORITYDBFULL 7
#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
#define S_HIPRIORITYDBEMPTY 6
#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
#define S_LOPRIORITYDBFULL 5
#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
#define S_LOPRIORITYDBEMPTY 4
#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
#define S_RSPQDISABLED 3 #define S_RSPQDISABLED 3
#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) #define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
#define F_RSPQDISABLED V_RSPQDISABLED(1U) #define F_RSPQDISABLED V_RSPQDISABLED(1U)
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "sge_defs.h" #include "sge_defs.h"
#include "t3_cpl.h" #include "t3_cpl.h"
#include "firmware_exports.h" #include "firmware_exports.h"
#include "cxgb3_offload.h"
#define USE_GTS 0 #define USE_GTS 0
...@@ -2833,8 +2834,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter) ...@@ -2833,8 +2834,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
} }
if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", queue_work(cxgb3_wq, &adapter->db_drop_task);
status & F_HIPIODRBDROPERR ? "high" : "lo");
if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
queue_work(cxgb3_wq, &adapter->db_full_task);
if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
queue_work(cxgb3_wq, &adapter->db_empty_task);
t3_write_reg(adapter, A_SG_INT_CAUSE, status); t3_write_reg(adapter, A_SG_INT_CAUSE, status);
if (status & SGE_FATALERR) if (status & SGE_FATALERR)
......
...@@ -1432,7 +1432,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg, ...@@ -1432,7 +1432,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
F_HIRCQPARITYERROR) F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
F_LOPIODRBDROPERR)
#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
F_NFASRCHFAIL) F_NFASRCHFAIL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment