Commit 80f61f19 authored by Arjun Vynipadath's avatar Arjun Vynipadath Committed by David S. Miller

cxgb4/chtls: Prefix adapter flags with CXGB4

Some of these macros were conflicting with global namespace,
hence prefixing them with CXGB4.
Signed-off-by: default avatarArjun Vynipadath <arjun@chelsio.com>
Signed-off-by: default avatarVishal Kulkarni <vishal@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 29ca1c5a
...@@ -616,7 +616,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) ...@@ -616,7 +616,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
pi = netdev_priv(ndev); pi = netdev_priv(ndev);
adap = pi->adapter; adap = pi->adapter;
if (!(adap->flags & FULL_INIT_DONE)) if (!(adap->flags & CXGB4_FULL_INIT_DONE))
return -EBADF; return -EBADF;
if (listen_hash_find(cdev, sk) >= 0) /* already have it */ if (listen_hash_find(cdev, sk) >= 0) /* already have it */
......
...@@ -81,7 +81,7 @@ static int is_fw_attached(struct cudbg_init *pdbg_init) ...@@ -81,7 +81,7 @@ static int is_fw_attached(struct cudbg_init *pdbg_init)
{ {
struct adapter *padap = pdbg_init->adap; struct adapter *padap = pdbg_init->adap;
if (!(padap->flags & FW_OK) || padap->use_bd) if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd)
return 0; return 0;
return 1; return 1;
......
...@@ -606,18 +606,18 @@ struct dentry; ...@@ -606,18 +606,18 @@ struct dentry;
struct work_struct; struct work_struct;
enum { /* adapter flags */ enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0), CXGB4_FULL_INIT_DONE = (1 << 0),
DEV_ENABLED = (1 << 1), CXGB4_DEV_ENABLED = (1 << 1),
USING_MSI = (1 << 2), CXGB4_USING_MSI = (1 << 2),
USING_MSIX = (1 << 3), CXGB4_USING_MSIX = (1 << 3),
FW_OK = (1 << 4), CXGB4_FW_OK = (1 << 4),
RSS_TNLALLLOOKUP = (1 << 5), CXGB4_RSS_TNLALLLOOKUP = (1 << 5),
USING_SOFT_PARAMS = (1 << 6), CXGB4_USING_SOFT_PARAMS = (1 << 6),
MASTER_PF = (1 << 7), CXGB4_MASTER_PF = (1 << 7),
FW_OFLD_CONN = (1 << 9), CXGB4_FW_OFLD_CONN = (1 << 9),
ROOT_NO_RELAXED_ORDERING = (1 << 10), CXGB4_ROOT_NO_RELAXED_ORDERING = (1 << 10),
SHUTTING_DOWN = (1 << 11), CXGB4_SHUTTING_DOWN = (1 << 11),
SGE_DBQ_TIMER = (1 << 12), CXGB4_SGE_DBQ_TIMER = (1 << 12),
}; };
enum { enum {
......
...@@ -3143,7 +3143,7 @@ static int tid_info_show(struct seq_file *seq, void *v) ...@@ -3143,7 +3143,7 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, ", in use: %u/%u\n", seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use), atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use)); atomic_read(&t->hash_tids_in_use));
} else if (adap->flags & FW_OFLD_CONN) { } else if (adap->flags & CXGB4_FW_OFLD_CONN) {
seq_printf(seq, "TID range: %u..%u/%u..%u", seq_printf(seq, "TID range: %u..%u/%u..%u",
t->aftid_base, t->aftid_base,
t->aftid_end, t->aftid_end,
......
...@@ -875,7 +875,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) ...@@ -875,7 +875,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
return -EINVAL; return -EINVAL;
if (adapter->flags & FULL_INIT_DONE) if (adapter->flags & CXGB4_FULL_INIT_DONE)
return -EBUSY; return -EBUSY;
for (i = 0; i < pi->nqsets; ++i) { for (i = 0; i < pi->nqsets; ++i) {
...@@ -940,7 +940,7 @@ static int get_dbqtimer_tick(struct net_device *dev) ...@@ -940,7 +940,7 @@ static int get_dbqtimer_tick(struct net_device *dev)
struct port_info *pi = netdev_priv(dev); struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter; struct adapter *adap = pi->adapter;
if (!(adap->flags & SGE_DBQ_TIMER)) if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
return 0; return 0;
return adap->sge.dbqtimer_tick; return adap->sge.dbqtimer_tick;
...@@ -957,7 +957,7 @@ static int get_dbqtimer(struct net_device *dev) ...@@ -957,7 +957,7 @@ static int get_dbqtimer(struct net_device *dev)
txq = &adap->sge.ethtxq[pi->first_qset]; txq = &adap->sge.ethtxq[pi->first_qset];
if (!(adap->flags & SGE_DBQ_TIMER)) if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
return 0; return 0;
/* all of the TX Queues use the same Timer Index */ /* all of the TX Queues use the same Timer Index */
...@@ -979,7 +979,7 @@ static int set_dbqtimer_tick(struct net_device *dev, int usecs) ...@@ -979,7 +979,7 @@ static int set_dbqtimer_tick(struct net_device *dev, int usecs)
u32 param, val; u32 param, val;
int ret; int ret;
if (!(adap->flags & SGE_DBQ_TIMER)) if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
return 0; return 0;
/* return early if it's the same Timer Tick we're already using */ /* return early if it's the same Timer Tick we're already using */
...@@ -1015,7 +1015,7 @@ static int set_dbqtimer(struct net_device *dev, int usecs) ...@@ -1015,7 +1015,7 @@ static int set_dbqtimer(struct net_device *dev, int usecs)
u32 param, val; u32 param, val;
int ret; int ret;
if (!(adap->flags & SGE_DBQ_TIMER)) if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
return 0; return 0;
/* Find the SGE Doorbell Timer Value that's closest to the requested /* Find the SGE Doorbell Timer Value that's closest to the requested
...@@ -1042,7 +1042,7 @@ static int set_dbqtimer(struct net_device *dev, int usecs) ...@@ -1042,7 +1042,7 @@ static int set_dbqtimer(struct net_device *dev, int usecs)
return 0; return 0;
for (qix = 0; qix < pi->nqsets; qix++, txq++) { for (qix = 0; qix < pi->nqsets; qix++, txq++) {
if (adap->flags & FULL_INIT_DONE) { if (adap->flags & CXGB4_FULL_INIT_DONE) {
param = param =
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
...@@ -1263,7 +1263,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) ...@@ -1263,7 +1263,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
* firmware image otherwise we'll try to do the entire job from the * firmware image otherwise we'll try to do the entire job from the
* host ... and we always "force" the operation in this path. * host ... and we always "force" the operation in this path.
*/ */
if (adap->flags & FULL_INIT_DONE) if (adap->flags & CXGB4_FULL_INIT_DONE)
mbox = adap->mbox; mbox = adap->mbox;
ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
...@@ -1342,7 +1342,7 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, ...@@ -1342,7 +1342,7 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
return 0; return 0;
/* Interface must be brought up atleast once */ /* Interface must be brought up atleast once */
if (pi->adapter->flags & FULL_INIT_DONE) { if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
for (i = 0; i < pi->rss_size; i++) for (i = 0; i < pi->rss_size; i++)
pi->rss[i] = p[i]; pi->rss[i] = p[i];
......
...@@ -74,7 +74,7 @@ int cxgb_fcoe_enable(struct net_device *netdev) ...@@ -74,7 +74,7 @@ int cxgb_fcoe_enable(struct net_device *netdev)
if (is_t4(adap->params.chip)) if (is_t4(adap->params.chip))
return -EINVAL; return -EINVAL;
if (!(adap->flags & FULL_INIT_DONE)) if (!(adap->flags & CXGB4_FULL_INIT_DONE))
return -EINVAL; return -EINVAL;
dev_info(adap->pdev_dev, "Enabling FCoE offload features\n"); dev_info(adap->pdev_dev, "Enabling FCoE offload features\n");
......
...@@ -524,7 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx) ...@@ -524,7 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
return -ENOMEM; return -ENOMEM;
fwr = __skb_put(skb, len); fwr = __skb_put(skb, len);
t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1 t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1
: adapter->sge.fw_evtq.abs_id); : adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request. /* Mark the filter as "pending" and ship off the Filter Work Request.
...@@ -1569,7 +1569,7 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id, ...@@ -1569,7 +1569,7 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id,
int ret; int ret;
/* If we are shutting down the adapter do not wait for completion */ /* If we are shutting down the adapter do not wait for completion */
if (netdev2adap(dev)->flags & SHUTTING_DOWN) if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
return __cxgb4_del_filter(dev, filter_id, fs, NULL); return __cxgb4_del_filter(dev, filter_id, fs, NULL);
init_completion(&ctx.completion); init_completion(&ctx.completion);
......
...@@ -147,7 +147,7 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -147,7 +147,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
per_chan = rxq_info->nrxq / adap->params.nports; per_chan = rxq_info->nrxq / adap->params.nports;
if (adap->flags & USING_MSIX) if (adap->flags & CXGB4_USING_MSIX)
msi_idx = 1; msi_idx = 1;
else else
msi_idx = -((int)s->intrq.abs_id + 1); msi_idx = -((int)s->intrq.abs_id + 1);
...@@ -195,7 +195,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) ...@@ -195,7 +195,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0; int i, ret = 0;
if (adap->flags & USING_MSIX) { if (adap->flags & CXGB4_USING_MSIX) {
rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq), rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
sizeof(unsigned short), sizeof(unsigned short),
GFP_KERNEL); GFP_KERNEL);
...@@ -206,7 +206,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) ...@@ -206,7 +206,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */ /* Tell uP to route control queue completions to rdma rspq */
if (adap->flags & FULL_INIT_DONE && if (adap->flags & CXGB4_FULL_INIT_DONE &&
!ret && uld_type == CXGB4_ULD_RDMA) { !ret && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
unsigned int cmplqid; unsigned int cmplqid;
...@@ -239,7 +239,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) ...@@ -239,7 +239,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
u32 param, cmdop, cmplqid = 0; u32 param, cmdop, cmplqid = 0;
int i; int i;
...@@ -258,7 +258,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) ...@@ -258,7 +258,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq, t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq); rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
if (adap->flags & USING_MSIX) if (adap->flags & CXGB4_USING_MSIX)
kfree(rxq_info->msix_tbl); kfree(rxq_info->msix_tbl);
} }
...@@ -273,7 +273,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, ...@@ -273,7 +273,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
if (!rxq_info) if (!rxq_info)
return -ENOMEM; return -ENOMEM;
if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
i = s->nqs_per_uld; i = s->nqs_per_uld;
rxq_info->nrxq = roundup(i, adap->params.nports); rxq_info->nrxq = roundup(i, adap->params.nports);
} else { } else {
...@@ -284,7 +284,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, ...@@ -284,7 +284,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
if (!uld_info->ciq) { if (!uld_info->ciq) {
rxq_info->nciq = 0; rxq_info->nciq = 0;
} else { } else {
if (adap->flags & USING_MSIX) if (adap->flags & CXGB4_USING_MSIX)
rxq_info->nciq = min_t(int, s->nqs_per_uld, rxq_info->nciq = min_t(int, s->nqs_per_uld,
num_online_cpus()); num_online_cpus());
else else
...@@ -611,10 +611,10 @@ static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type ...@@ -611,10 +611,10 @@ static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type
adap->uld[type].add = NULL; adap->uld[type].add = NULL;
release_sge_txq_uld(adap, type); release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE) if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type); quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX) if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type); free_msix_queue_irqs_uld(adap, type);
free_sge_queues_uld(adap, type); free_sge_queues_uld(adap, type);
...@@ -673,7 +673,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) ...@@ -673,7 +673,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->sge_egrstatuspagesize = adap->sge.stat_len; lld->sge_egrstatuspagesize = adap->sge.stat_len;
lld->sge_pktshift = adap->sge.pktshift; lld->sge_pktshift = adap->sge.pktshift;
lld->ulp_crypto = adap->params.crypto; lld->ulp_crypto = adap->params.crypto;
lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
lld->max_ordird_qp = adap->params.max_ordird_qp; lld->max_ordird_qp = adap->params.max_ordird_qp;
lld->max_ird_adapter = adap->params.max_ird_adapter; lld->max_ird_adapter = adap->params.max_ird_adapter;
lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
...@@ -702,7 +702,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) ...@@ -702,7 +702,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
adap->uld[uld].handle = handle; adap->uld[uld].handle = handle;
t4_register_netevent_notifier(); t4_register_netevent_notifier();
if (adap->flags & FULL_INIT_DONE) if (adap->flags & CXGB4_FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP); adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
} }
...@@ -737,13 +737,13 @@ void cxgb4_register_uld(enum cxgb4_uld type, ...@@ -737,13 +737,13 @@ void cxgb4_register_uld(enum cxgb4_uld type,
ret = setup_sge_queues_uld(adap, type, p->lro); ret = setup_sge_queues_uld(adap, type, p->lro);
if (ret) if (ret)
goto free_queues; goto free_queues;
if (adap->flags & USING_MSIX) { if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs_uld(adap, type); name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type); ret = request_msix_queue_irqs_uld(adap, type);
if (ret) if (ret)
goto free_rxq; goto free_rxq;
} }
if (adap->flags & FULL_INIT_DONE) if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type); enable_rx_uld(adap, type);
if (adap->uld[type].add) if (adap->uld[type].add)
goto free_irq; goto free_irq;
...@@ -754,9 +754,9 @@ void cxgb4_register_uld(enum cxgb4_uld type, ...@@ -754,9 +754,9 @@ void cxgb4_register_uld(enum cxgb4_uld type,
uld_attach(adap, type); uld_attach(adap, type);
continue; continue;
free_irq: free_irq:
if (adap->flags & FULL_INIT_DONE) if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type); quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX) if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type); free_msix_queue_irqs_uld(adap, type);
free_rxq: free_rxq:
free_sge_queues_uld(adap, type); free_sge_queues_uld(adap, type);
......
...@@ -3375,7 +3375,7 @@ static irqreturn_t t4_intr_msi(int irq, void *cookie) ...@@ -3375,7 +3375,7 @@ static irqreturn_t t4_intr_msi(int irq, void *cookie)
{ {
struct adapter *adap = cookie; struct adapter *adap = cookie;
if (adap->flags & MASTER_PF) if (adap->flags & CXGB4_MASTER_PF)
t4_slow_intr_handler(adap); t4_slow_intr_handler(adap);
process_intrq(adap); process_intrq(adap);
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -3391,7 +3391,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie) ...@@ -3391,7 +3391,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
struct adapter *adap = cookie; struct adapter *adap = cookie;
t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
process_intrq(adap)) process_intrq(adap))
return IRQ_HANDLED; return IRQ_HANDLED;
return IRQ_NONE; /* probably shared interrupt */ return IRQ_NONE; /* probably shared interrupt */
...@@ -3406,9 +3406,9 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie) ...@@ -3406,9 +3406,9 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
*/ */
irq_handler_t t4_intr_handler(struct adapter *adap) irq_handler_t t4_intr_handler(struct adapter *adap)
{ {
if (adap->flags & USING_MSIX) if (adap->flags & CXGB4_USING_MSIX)
return t4_sge_intr_msix; return t4_sge_intr_msix;
if (adap->flags & USING_MSI) if (adap->flags & CXGB4_USING_MSI)
return t4_intr_msi; return t4_intr_msi;
return t4_intr_intx; return t4_intr_intx;
} }
...@@ -3441,7 +3441,7 @@ static void sge_rx_timer_cb(struct timer_list *t) ...@@ -3441,7 +3441,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
* global Master PF activities like checking for chip ingress stalls, * global Master PF activities like checking for chip ingress stalls,
* etc. * etc.
*/ */
if (!(adap->flags & MASTER_PF)) if (!(adap->flags & CXGB4_MASTER_PF))
goto done; goto done;
t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
...@@ -3549,7 +3549,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, ...@@ -3549,7 +3549,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct fw_iq_cmd c; struct fw_iq_cmd c;
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev); struct port_info *pi = netdev_priv(dev);
int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING); int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
/* Size needs to be multiple of 16, including status entry. */ /* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16); iq->size = roundup(iq->size, 16);
......
...@@ -77,7 +77,7 @@ int cxgb4_get_srq_entry(struct net_device *dev, ...@@ -77,7 +77,7 @@ int cxgb4_get_srq_entry(struct net_device *dev,
adap = netdev2adap(dev); adap = netdev2adap(dev);
s = adap->srq; s = adap->srq;
if (!(adap->flags & FULL_INIT_DONE) || !s) if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s)
goto out; goto out;
skb = alloc_skb(sizeof(*req), GFP_KERNEL); skb = alloc_skb(sizeof(*req), GFP_KERNEL);
......
...@@ -198,7 +198,7 @@ static void t4_report_fw_error(struct adapter *adap) ...@@ -198,7 +198,7 @@ static void t4_report_fw_error(struct adapter *adap)
if (pcie_fw & PCIE_FW_ERR_F) { if (pcie_fw & PCIE_FW_ERR_F) {
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]); reason[PCIE_FW_EVAL_G(pcie_fw)]);
adap->flags &= ~FW_OK; adap->flags &= ~CXGB4_FW_OK;
} }
} }
...@@ -5243,7 +5243,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map) ...@@ -5243,7 +5243,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
static unsigned int t4_use_ldst(struct adapter *adap) static unsigned int t4_use_ldst(struct adapter *adap)
{ {
return (adap->flags & FW_OK) && !adap->use_bd; return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
} }
/** /**
...@@ -6132,7 +6132,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx) ...@@ -6132,7 +6132,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
* ( MPSBGMAP[Port 1] << 8 ) | * ( MPSBGMAP[Port 1] << 8 ) |
* ( MPSBGMAP[Port 0] << 0 )) * ( MPSBGMAP[Port 0] << 0 ))
*/ */
if (adapter->flags & FW_OK) { if (adapter->flags & CXGB4_FW_OK) {
u32 param, val; u32 param, val;
int ret; int ret;
...@@ -7093,10 +7093,10 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, ...@@ -7093,10 +7093,10 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
if (!t4_fw_matches_chip(adap, fw_hdr)) if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL; return -EINVAL;
/* Disable FW_OK flag so that mbox commands with FW_OK flag set /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
* wont be sent when we are flashing FW. * set wont be sent when we are flashing FW.
*/ */
adap->flags &= ~FW_OK; adap->flags &= ~CXGB4_FW_OK;
ret = t4_fw_halt(adap, mbox, force); ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force) if (ret < 0 && !force)
...@@ -7135,7 +7135,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, ...@@ -7135,7 +7135,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
*/ */
(void)t4_init_devlog_params(adap); (void)t4_init_devlog_params(adap);
out: out:
adap->flags |= FW_OK; adap->flags |= CXGB4_FW_OK;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment