Commit bc22ff53 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-update-kconfig-and-fixes'

Hariprasad Shenai says:

====================
Update Kconfig and some fixes for cxgb4

This series update Kconfig to add description for Chelsio's next
generation T6 family of adapters, also fixes ethtool stats alignment
and prevents simultaneous execution of service_ofldq thread, deals with
queue wrap around and adds some fl counters for debugging purpose and
device ID for new T5 adapters.

This patch series has been created against net-next tree and includes
patches on cxgb4 driver.

We have included all the maintainers of respective drivers. Kindly review
the change and let us know in case of any review comments.

Thanks

V2: Declare 'service_ofldq_running' as bool in Patch 4/7 ("cxgb4: prevent
    simultaneous execution of service_ofldq()") based on review comment
    by David Miller
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9cbe9fd5 76928c90
...@@ -65,13 +65,14 @@ config CHELSIO_T3 ...@@ -65,13 +65,14 @@ config CHELSIO_T3
will be called cxgb3. will be called cxgb3.
config CHELSIO_T4 config CHELSIO_T4
tristate "Chelsio Communications T4/T5 Ethernet support" tristate "Chelsio Communications T4/T5/T6 Ethernet support"
depends on PCI && (IPV6 || IPV6=n) depends on PCI && (IPV6 || IPV6=n)
select FW_LOADER select FW_LOADER
select MDIO select MDIO
---help--- ---help---
This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet
adapter and T5 based 40Gb Ethernet adapter. adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
Ethernet adapters.
For general information about Chelsio and our products, visit For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>. our website at <http://www.chelsio.com>.
...@@ -85,7 +86,7 @@ config CHELSIO_T4 ...@@ -85,7 +86,7 @@ config CHELSIO_T4
will be called cxgb4. will be called cxgb4.
config CHELSIO_T4_DCB config CHELSIO_T4_DCB
bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards" bool "Data Center Bridging (DCB) Support for Chelsio T4/T5/T6 cards"
default n default n
depends on CHELSIO_T4 && DCB depends on CHELSIO_T4 && DCB
---help--- ---help---
...@@ -107,12 +108,12 @@ config CHELSIO_T4_FCOE ...@@ -107,12 +108,12 @@ config CHELSIO_T4_FCOE
If unsure, say N. If unsure, say N.
config CHELSIO_T4VF config CHELSIO_T4VF
tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support" tristate "Chelsio Communications T4/T5/T6 Virtual Function Ethernet support"
depends on PCI depends on PCI
---help--- ---help---
This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet
adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual adapters and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
Functions. Ethernet adapters with PCI-E SR-IOV Virtual Functions.
For general information about Chelsio and our products, visit For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>. our website at <http://www.chelsio.com>.
......
...@@ -483,6 +483,8 @@ struct sge_fl { /* SGE free-buffer queue state */ ...@@ -483,6 +483,8 @@ struct sge_fl { /* SGE free-buffer queue state */
unsigned int pidx; /* producer index */ unsigned int pidx; /* producer index */
unsigned long alloc_failed; /* # of times buffer allocation failed */ unsigned long alloc_failed; /* # of times buffer allocation failed */
unsigned long large_alloc_failed; unsigned long large_alloc_failed;
unsigned long mapping_err; /* # of RX Buffer DMA Mapping failures */
unsigned long low; /* # of times momentarily starving */
unsigned long starving; unsigned long starving;
/* RO fields */ /* RO fields */
unsigned int cntxt_id; /* SGE context id for the free list */ unsigned int cntxt_id; /* SGE context id for the free list */
...@@ -618,6 +620,7 @@ struct sge_ofld_txq { /* state for an SGE offload Tx queue */ ...@@ -618,6 +620,7 @@ struct sge_ofld_txq { /* state for an SGE offload Tx queue */
struct adapter *adap; struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */ struct sk_buff_head sendq; /* list of backpressured packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */ struct tasklet_struct qresume_tsk; /* restarts the queue */
bool service_ofldq_running; /* service_ofldq() is processing sendq */
u8 full; /* the Tx ring is full */ u8 full; /* the Tx ring is full */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
......
...@@ -2325,6 +2325,8 @@ do { \ ...@@ -2325,6 +2325,8 @@ do { \
TL("TxMapErr:", mapping_err); TL("TxMapErr:", mapping_err);
RL("FLAllocErr:", fl.alloc_failed); RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving); RL("FLStarving:", fl.starving);
} else if (iscsi_idx < iscsi_entries) { } else if (iscsi_idx < iscsi_entries) {
...@@ -2359,6 +2361,8 @@ do { \ ...@@ -2359,6 +2361,8 @@ do { \
RL("RxNoMem:", stats.nomem); RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed); RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving); RL("FLStarving:", fl.starving);
} else if (rdma_idx < rdma_entries) { } else if (rdma_idx < rdma_entries) {
...@@ -2388,6 +2392,8 @@ do { \ ...@@ -2388,6 +2392,8 @@ do { \
RL("RxNoMem:", stats.nomem); RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed); RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving); RL("FLStarving:", fl.starving);
} else if (ciq_idx < ciq_entries) { } else if (ciq_idx < ciq_entries) {
......
...@@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val) ...@@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val)
} }
static const char stats_strings[][ETH_GSTRING_LEN] = { static const char stats_strings[][ETH_GSTRING_LEN] = {
"tx_octets_ok ", "tx_octets_ok ",
"tx_frames_ok ", "tx_frames_ok ",
"tx_broadcast_frames ", "tx_broadcast_frames ",
"tx_multicast_frames ", "tx_multicast_frames ",
"tx_unicast_frames ", "tx_unicast_frames ",
"tx_error_frames ", "tx_error_frames ",
"tx_frames_64 ", "tx_frames_64 ",
"tx_frames_65_to_127 ", "tx_frames_65_to_127 ",
"tx_frames_128_to_255 ", "tx_frames_128_to_255 ",
"tx_frames_256_to_511 ", "tx_frames_256_to_511 ",
"tx_frames_512_to_1023 ", "tx_frames_512_to_1023 ",
"tx_frames_1024_to_1518 ", "tx_frames_1024_to_1518 ",
"tx_frames_1519_to_max ", "tx_frames_1519_to_max ",
"tx_frames_dropped ", "tx_frames_dropped ",
"tx_pause_frames ", "tx_pause_frames ",
"tx_ppp0_frames ", "tx_ppp0_frames ",
"tx_ppp1_frames ", "tx_ppp1_frames ",
"tx_ppp2_frames ", "tx_ppp2_frames ",
"tx_ppp3_frames ", "tx_ppp3_frames ",
"tx_ppp4_frames ", "tx_ppp4_frames ",
"tx_ppp5_frames ", "tx_ppp5_frames ",
"tx_ppp6_frames ", "tx_ppp6_frames ",
"tx_ppp7_frames ", "tx_ppp7_frames ",
"rx_octets_ok ", "rx_octets_ok ",
"rx_frames_ok ", "rx_frames_ok ",
"rx_broadcast_frames ", "rx_broadcast_frames ",
"rx_multicast_frames ", "rx_multicast_frames ",
"rx_unicast_frames ", "rx_unicast_frames ",
"rx_frames_too_long ", "rx_frames_too_long ",
"rx_jabber_errors ", "rx_jabber_errors ",
"rx_fcs_errors ", "rx_fcs_errors ",
"rx_length_errors ", "rx_length_errors ",
"rx_symbol_errors ", "rx_symbol_errors ",
"rx_runt_frames ", "rx_runt_frames ",
"rx_frames_64 ", "rx_frames_64 ",
"rx_frames_65_to_127 ", "rx_frames_65_to_127 ",
"rx_frames_128_to_255 ", "rx_frames_128_to_255 ",
"rx_frames_256_to_511 ", "rx_frames_256_to_511 ",
"rx_frames_512_to_1023 ", "rx_frames_512_to_1023 ",
"rx_frames_1024_to_1518 ", "rx_frames_1024_to_1518 ",
"rx_frames_1519_to_max ", "rx_frames_1519_to_max ",
"rx_pause_frames ", "rx_pause_frames ",
"rx_ppp0_frames ", "rx_ppp0_frames ",
"rx_ppp1_frames ", "rx_ppp1_frames ",
"rx_ppp2_frames ", "rx_ppp2_frames ",
"rx_ppp3_frames ", "rx_ppp3_frames ",
"rx_ppp4_frames ", "rx_ppp4_frames ",
"rx_ppp5_frames ", "rx_ppp5_frames ",
"rx_ppp6_frames ", "rx_ppp6_frames ",
"rx_ppp7_frames ", "rx_ppp7_frames ",
"rx_bg0_frames_dropped ", "rx_bg0_frames_dropped ",
"rx_bg1_frames_dropped ", "rx_bg1_frames_dropped ",
"rx_bg2_frames_dropped ", "rx_bg2_frames_dropped ",
"rx_bg3_frames_dropped ", "rx_bg3_frames_dropped ",
"rx_bg0_frames_trunc ", "rx_bg0_frames_trunc ",
"rx_bg1_frames_trunc ", "rx_bg1_frames_trunc ",
"rx_bg2_frames_trunc ", "rx_bg2_frames_trunc ",
"rx_bg3_frames_trunc ", "rx_bg3_frames_trunc ",
"tso ", "tso ",
"tx_csum_offload ", "tx_csum_offload ",
"rx_csum_good ", "rx_csum_good ",
"vlan_extractions ", "vlan_extractions ",
"vlan_insertions ", "vlan_insertions ",
"gro_packets ", "gro_packets ",
"gro_merged ", "gro_merged ",
}; };
static char adapter_stats_strings[][ETH_GSTRING_LEN] = { static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
......
...@@ -406,7 +406,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q, ...@@ -406,7 +406,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
*/ */
static inline int reclaimable(const struct sge_txq *q) static inline int reclaimable(const struct sge_txq *q)
{ {
int hw_cidx = ntohs(q->stat->cidx); int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
hw_cidx -= q->cidx; hw_cidx -= q->cidx;
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
} }
...@@ -613,6 +613,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, ...@@ -613,6 +613,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
__free_pages(pg, s->fl_pg_order); __free_pages(pg, s->fl_pg_order);
q->mapping_err++;
goto out; /* do not try small pages for this error */ goto out; /* do not try small pages for this error */
} }
mapping |= RX_LARGE_PG_BUF; mapping |= RX_LARGE_PG_BUF;
...@@ -642,6 +643,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, ...@@ -642,6 +643,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
put_page(pg); put_page(pg);
q->mapping_err++;
goto out; goto out;
} }
*d++ = cpu_to_be64(mapping); *d++ = cpu_to_be64(mapping);
...@@ -663,6 +665,7 @@ out: cred = q->avail - cred; ...@@ -663,6 +665,7 @@ out: cred = q->avail - cred;
if (unlikely(fl_starving(adap, q))) { if (unlikely(fl_starving(adap, q))) {
smp_wmb(); smp_wmb();
q->low++;
set_bit(q->cntxt_id - adap->sge.egr_start, set_bit(q->cntxt_id - adap->sge.egr_start,
adap->sge.starving_fl); adap->sge.starving_fl);
} }
...@@ -1029,6 +1032,30 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, ...@@ -1029,6 +1032,30 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
*p = 0; *p = 0;
} }
static void *inline_tx_skb_header(const struct sk_buff *skb,
const struct sge_txq *q, void *pos,
int length)
{
u64 *p;
int left = (void *)q->stat - pos;
if (likely(length <= left)) {
memcpy(pos, skb->data, length);
pos += length;
} else {
memcpy(pos, skb->data, left);
memcpy(q->desc, skb->data + left, length - left);
pos = (void *)q->desc + (length - left);
}
/* 0-pad to multiple of 16 */
p = PTR_ALIGN(pos, 8);
if ((uintptr_t)p & 8) {
*p = 0;
return p + 1;
}
return p;
}
/* /*
* Figure out what HW csum a packet wants and return the appropriate control * Figure out what HW csum a packet wants and return the appropriate control
* bits. * bits.
...@@ -1320,7 +1347,7 @@ out_free: dev_kfree_skb_any(skb); ...@@ -1320,7 +1347,7 @@ out_free: dev_kfree_skb_any(skb);
*/ */
static inline void reclaim_completed_tx_imm(struct sge_txq *q) static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{ {
int hw_cidx = ntohs(q->stat->cidx); int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
int reclaim = hw_cidx - q->cidx; int reclaim = hw_cidx - q->cidx;
if (reclaim < 0) if (reclaim < 0)
...@@ -1542,24 +1569,50 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) ...@@ -1542,24 +1569,50 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
} }
/** /**
* service_ofldq - restart a suspended offload queue * service_ofldq - service/restart a suspended offload queue
* @q: the offload queue * @q: the offload queue
* *
* Services an offload Tx queue by moving packets from its packet queue * Services an offload Tx queue by moving packets from its Pending Send
* to the HW Tx ring. The function starts and ends with the queue locked. * Queue to the Hardware TX ring. The function starts and ends with the
* Send Queue locked, but drops the lock while putting the skb at the
* head of the Send Queue onto the Hardware TX Ring. Dropping the lock
* allows more skbs to be added to the Send Queue by other threads.
* The packet being processed at the head of the Pending Send Queue is
* left on the queue in case we experience DMA Mapping errors, etc.
* and need to give up and restart later.
*
* service_ofldq() can be thought of as a task which opportunistically
* uses other threads execution contexts. We use the Offload Queue
* boolean "service_ofldq_running" to make sure that only one instance
* is ever running at a time ...
*/ */
static void service_ofldq(struct sge_ofld_txq *q) static void service_ofldq(struct sge_ofld_txq *q)
{ {
u64 *pos; u64 *pos, *before, *end;
int credits; int credits;
struct sk_buff *skb; struct sk_buff *skb;
struct sge_txq *txq;
unsigned int left;
unsigned int written = 0; unsigned int written = 0;
unsigned int flits, ndesc; unsigned int flits, ndesc;
/* If another thread is currently in service_ofldq() processing the
* Pending Send Queue then there's nothing to do. Otherwise, flag
* that we're doing the work and continue. Examining/modifying
* the Offload Queue boolean "service_ofldq_running" must be done
* while holding the Pending Send Queue Lock.
*/
if (q->service_ofldq_running)
return;
q->service_ofldq_running = true;
while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
/* /* We drop the lock while we're working with the skb at the
* We drop the lock but leave skb on sendq, thus retaining * head of the Pending Send Queue. This allows more skbs to
* exclusive access to the state of the queue. * be added to the Pending Send Queue while we're working on
* this one. We don't need to lock to guard the TX Ring
* updates because only one thread of execution is ever
* allowed into service_ofldq() at a time.
*/ */
spin_unlock(&q->sendq.lock); spin_unlock(&q->sendq.lock);
...@@ -1583,9 +1636,32 @@ static void service_ofldq(struct sge_ofld_txq *q) ...@@ -1583,9 +1636,32 @@ static void service_ofldq(struct sge_ofld_txq *q)
} else { } else {
int last_desc, hdr_len = skb_transport_offset(skb); int last_desc, hdr_len = skb_transport_offset(skb);
memcpy(pos, skb->data, hdr_len); /* The WR headers may not fit within one descriptor.
write_sgl(skb, &q->q, (void *)pos + hdr_len, * So we need to deal with wrap-around here.
pos + flits, hdr_len, */
before = (u64 *)pos;
end = (u64 *)pos + flits;
txq = &q->q;
pos = (void *)inline_tx_skb_header(skb, &q->q,
(void *)pos,
hdr_len);
if (before > (u64 *)pos) {
left = (u8 *)end - (u8 *)txq->stat;
end = (void *)txq->desc + left;
}
/* If current position is already at the end of the
* ofld queue, reset the current to point to
* start of the queue and update the end ptr as well.
*/
if (pos == (u64 *)txq->stat) {
left = (u8 *)end - (u8 *)txq->stat;
end = (void *)txq->desc + left;
pos = (void *)txq->desc;
}
write_sgl(skb, &q->q, (void *)pos,
end, hdr_len,
(dma_addr_t *)skb->head); (dma_addr_t *)skb->head);
#ifdef CONFIG_NEED_DMA_MAP_STATE #ifdef CONFIG_NEED_DMA_MAP_STATE
skb->dev = q->adap->port[0]; skb->dev = q->adap->port[0];
...@@ -1604,6 +1680,11 @@ static void service_ofldq(struct sge_ofld_txq *q) ...@@ -1604,6 +1680,11 @@ static void service_ofldq(struct sge_ofld_txq *q)
written = 0; written = 0;
} }
/* Reacquire the Pending Send Queue Lock so we can unlink the
* skb we've just successfully transferred to the TX Ring and
* loop for the next skb which may be at the head of the
* Pending Send Queue.
*/
spin_lock(&q->sendq.lock); spin_lock(&q->sendq.lock);
__skb_unlink(skb, &q->sendq); __skb_unlink(skb, &q->sendq);
if (is_ofld_imm(skb)) if (is_ofld_imm(skb))
...@@ -1611,6 +1692,11 @@ static void service_ofldq(struct sge_ofld_txq *q) ...@@ -1611,6 +1692,11 @@ static void service_ofldq(struct sge_ofld_txq *q)
} }
if (likely(written)) if (likely(written))
ring_tx_db(q->adap, &q->q, written); ring_tx_db(q->adap, &q->q, written);
/*Indicate that no thread is processing the Pending Send Queue
* currently.
*/
q->service_ofldq_running = false;
} }
/** /**
...@@ -1624,9 +1710,19 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) ...@@ -1624,9 +1710,19 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
{ {
skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
spin_lock(&q->sendq.lock); spin_lock(&q->sendq.lock);
/* Queue the new skb onto the Offload Queue's Pending Send Queue. If
* that results in this new skb being the only one on the queue, start
* servicing it. If there are other skbs already on the list, then
* either the queue is currently being processed or it's been stopped
* for some reason and it'll be restarted at a later time. Restart
* paths are triggered by events like experiencing a DMA Mapping Error
* or filling the Hardware TX Ring.
*/
__skb_queue_tail(&q->sendq, skb); __skb_queue_tail(&q->sendq, skb);
if (q->sendq.qlen == 1) if (q->sendq.qlen == 1)
service_ofldq(q); service_ofldq(q);
spin_unlock(&q->sendq.lock); spin_unlock(&q->sendq.lock);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -162,6 +162,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN ...@@ -162,6 +162,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */ CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
/* T6 adapters: /* T6 adapters:
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment