Commit bc0a9a4b authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-next'

Hariprasad Shenai says:

====================
Misc. fixes for cxgb4

This patch series provides miscelleneous fixes for Chelsio T4/T5 adapters
cxgb4 driver related to SGE and MTU.

Also fixes regression in LSO calcuation path.
("cxgb4: Calculate len properly for LSO path")

The patches series is created against David Miller's 'net-next' tree.
And includes patches on cxgb4 driver.

We would like to request this patch series to get merged via David Miller's
'net-next' tree.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 433131ba ca71de6b
......@@ -556,8 +556,13 @@ struct sge {
u32 pktshift; /* padding between CPL & packet data */
u32 fl_align; /* response queue message alignment */
u32 fl_starve_thres; /* Free List starvation threshold */
unsigned int starve_thres;
u8 idma_state[2];
/* State variables for detecting an SGE Ingress DMA hang */
unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
unsigned int egr_start;
unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
......@@ -1032,4 +1037,5 @@ void t4_db_dropped(struct adapter *adapter);
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
#endif /* __CXGB4_H__ */
......@@ -93,6 +93,16 @@
*/
#define TX_QCHECK_PERIOD (HZ / 2)
/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
* (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
* State Machines in the same state for this amount of time (in HZ) then we'll
* issue a warning about a potential hang. We'll repeat the warning as the
* SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
* the situation clears. If the situation clears, we'll note that as well.
*/
#define SGE_IDMA_WARN_THRESH (1 * HZ)
#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
/*
* Max number of Tx descriptors to be reclaimed by the Tx timer.
*/
......@@ -1041,7 +1051,6 @@ out_free: dev_kfree_skb(skb);
end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
len += sizeof(*cpl);
ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
......@@ -1069,6 +1078,7 @@ out_free: dev_kfree_skb(skb);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
len += sizeof(*cpl);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN(len));
cpl = (void *)(wr + 1);
......@@ -2008,7 +2018,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
static void sge_rx_timer_cb(unsigned long data)
{
unsigned long m;
unsigned int i, cnt[2];
unsigned int i, idma_same_state_cnt[2];
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
......@@ -2031,21 +2041,64 @@ static void sge_rx_timer_cb(unsigned long data)
}
t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
for (i = 0; i < 2; i++)
if (cnt[i] >= s->starve_thres) {
if (s->idma_state[i] || cnt[i] == 0xffffffff)
continue;
s->idma_state[i] = 1;
t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
dev_warn(adap->pdev_dev,
"SGE idma%u starvation detected for "
"queue %lu\n", i, m & 0xffff);
} else if (s->idma_state[i])
s->idma_state[i] = 0;
idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
for (i = 0; i < 2; i++) {
u32 debug0, debug11;
/* If the Ingress DMA Same State Counter ("timer") is less
* than 1s, then we can reset our synthesized Stall Timer and
* continue. If we have previously emitted warnings about a
* potential stalled Ingress Queue, issue a note indicating
* that the Ingress Queue has resumed forward progress.
*/
if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
i, s->idma_qid[i],
s->idma_stalled[i]/HZ);
s->idma_stalled[i] = 0;
continue;
}
/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
* domain. The first time we get here it'll be because we
* passed the 1s Threshold; each additional time it'll be
* because the RX Timer Callback is being fired on its regular
* schedule.
*
* If the stall is below our Potential Hung Ingress Queue
* Warning Threshold, continue.
*/
if (s->idma_stalled[i] == 0)
s->idma_stalled[i] = HZ;
else
s->idma_stalled[i] += RX_QCHECK_PERIOD;
if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
continue;
/* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
continue;
/* Read and save the SGE IDMA State and Queue ID information.
* We do this every time in case it changes across time ...
*/
t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
i, s->idma_qid[i], s->idma_state[i],
s->idma_stalled[i]/HZ, debug0, debug11);
t4_sge_decode_idma_state(adap, s->idma_state[i]);
}
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
......@@ -2596,11 +2649,19 @@ static int t4_sge_init_soft(struct adapter *adap)
fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
/* We only bother using the Large Page logic if the Large Page Buffer
* is larger than our Page Size Buffer.
*/
if (fl_large_pg <= fl_small_pg)
fl_large_pg = 0;
#undef READ_FL_BUF
/* The Page Size Buffer must be exactly equal to our Page Size and the
* Large Page Size Buffer should be 0 (per above) or a power of 2.
*/
if (fl_small_pg != PAGE_SIZE ||
(fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
(fl_large_pg & (fl_large_pg-1)) != 0))) {
(fl_large_pg & (fl_large_pg-1)) != 0) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
return -EINVAL;
......@@ -2715,8 +2776,8 @@ static int t4_sge_init_hard(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
u32 sge_control;
int ret;
u32 sge_control, sge_conm_ctrl;
int ret, egress_threshold;
/*
* Ingress Padding Boundary and Egress Status Page Size are set up by
......@@ -2741,15 +2802,24 @@ int t4_sge_init(struct adapter *adap)
* SGE's Egress Congestion Threshold. If it isn't, then we can get
* stuck waiting for new packets while the SGE is waiting for us to
* give it more Free List entries. (Note that the SGE's Egress
* Congestion Threshold is in units of 2 Free List pointers.)
* Congestion Threshold is in units of 2 Free List pointers.) For T4,
* there was only a single field to control this. For T5 there's the
* original field which now only applies to Unpacked Mode Free List
* buffers and a new field which only applies to Packed Mode Free List
* buffers.
*/
s->fl_starve_thres
= EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
if (is_t4(adap->params.chip))
egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
else
egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
s->fl_starve_thres = 2*egress_threshold + 1;
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
s->idma_state[0] = s->idma_state[1] = 0;
s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
s->idma_stalled[0] = 0;
s->idma_stalled[1] = 0;
spin_lock_init(&s->intrq_lock);
return 0;
......
......@@ -2596,6 +2596,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_sge_decode_idma_state - decode the idma state
* @adap: the adapter
* @state: the state idma is stuck in
*/
void t4_sge_decode_idma_state(struct adapter *adapter, int state)
{
static const char * const t4_decode[] = {
"IDMA_IDLE",
"IDMA_PUSH_MORE_CPL_FIFO",
"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
"Not used",
"IDMA_PHYSADDR_SEND_PCIEHDR",
"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
"IDMA_PHYSADDR_SEND_PAYLOAD",
"IDMA_SEND_FIFO_TO_IMSG",
"IDMA_FL_REQ_DATA_FL_PREP",
"IDMA_FL_REQ_DATA_FL",
"IDMA_FL_DROP",
"IDMA_FL_H_REQ_HEADER_FL",
"IDMA_FL_H_SEND_PCIEHDR",
"IDMA_FL_H_PUSH_CPL_FIFO",
"IDMA_FL_H_SEND_CPL",
"IDMA_FL_H_SEND_IP_HDR_FIRST",
"IDMA_FL_H_SEND_IP_HDR",
"IDMA_FL_H_REQ_NEXT_HEADER_FL",
"IDMA_FL_H_SEND_NEXT_PCIEHDR",
"IDMA_FL_H_SEND_IP_HDR_PADDING",
"IDMA_FL_D_SEND_PCIEHDR",
"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
"IDMA_FL_D_REQ_NEXT_DATA_FL",
"IDMA_FL_SEND_PCIEHDR",
"IDMA_FL_PUSH_CPL_FIFO",
"IDMA_FL_SEND_CPL",
"IDMA_FL_SEND_PAYLOAD_FIRST",
"IDMA_FL_SEND_PAYLOAD",
"IDMA_FL_REQ_NEXT_DATA_FL",
"IDMA_FL_SEND_NEXT_PCIEHDR",
"IDMA_FL_SEND_PADDING",
"IDMA_FL_SEND_COMPLETION_TO_IMSG",
"IDMA_FL_SEND_FIFO_TO_IMSG",
"IDMA_FL_REQ_DATAFL_DONE",
"IDMA_FL_REQ_HEADERFL_DONE",
};
static const char * const t5_decode[] = {
"IDMA_IDLE",
"IDMA_ALMOST_IDLE",
"IDMA_PUSH_MORE_CPL_FIFO",
"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
"IDMA_PHYSADDR_SEND_PCIEHDR",
"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
"IDMA_PHYSADDR_SEND_PAYLOAD",
"IDMA_SEND_FIFO_TO_IMSG",
"IDMA_FL_REQ_DATA_FL",
"IDMA_FL_DROP",
"IDMA_FL_DROP_SEND_INC",
"IDMA_FL_H_REQ_HEADER_FL",
"IDMA_FL_H_SEND_PCIEHDR",
"IDMA_FL_H_PUSH_CPL_FIFO",
"IDMA_FL_H_SEND_CPL",
"IDMA_FL_H_SEND_IP_HDR_FIRST",
"IDMA_FL_H_SEND_IP_HDR",
"IDMA_FL_H_REQ_NEXT_HEADER_FL",
"IDMA_FL_H_SEND_NEXT_PCIEHDR",
"IDMA_FL_H_SEND_IP_HDR_PADDING",
"IDMA_FL_D_SEND_PCIEHDR",
"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
"IDMA_FL_D_REQ_NEXT_DATA_FL",
"IDMA_FL_SEND_PCIEHDR",
"IDMA_FL_PUSH_CPL_FIFO",
"IDMA_FL_SEND_CPL",
"IDMA_FL_SEND_PAYLOAD_FIRST",
"IDMA_FL_SEND_PAYLOAD",
"IDMA_FL_REQ_NEXT_DATA_FL",
"IDMA_FL_SEND_NEXT_PCIEHDR",
"IDMA_FL_SEND_PADDING",
"IDMA_FL_SEND_COMPLETION_TO_IMSG",
};
static const u32 sge_regs[] = {
SGE_DEBUG_DATA_LOW_INDEX_2,
SGE_DEBUG_DATA_LOW_INDEX_3,
SGE_DEBUG_DATA_HIGH_INDEX_10,
};
const char **sge_idma_decode;
int sge_idma_decode_nstates;
int i;
if (is_t4(adapter->params.chip)) {
sge_idma_decode = (const char **)t4_decode;
sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
} else {
sge_idma_decode = (const char **)t5_decode;
sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
}
if (state < sge_idma_decode_nstates)
CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
else
CH_WARN(adapter, "idma state %d unknown\n", state);
for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
CH_WARN(adapter, "SGE register %#x value %#x\n",
sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
}
/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
......
......@@ -230,6 +230,12 @@
#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
#define EGRTHRESHOLDPACKING_MASK 0x3fU
#define EGRTHRESHOLDPACKING_SHIFT 14
#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT)
#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
EGRTHRESHOLDPACKING_MASK)
#define SGE_DBFIFO_STATUS 0x10a4
#define HP_INT_THRESH_SHIFT 28
#define HP_INT_THRESH_MASK 0xfU
......@@ -278,6 +284,9 @@
#define SGE_DEBUG_INDEX 0x10cc
#define SGE_DEBUG_DATA_HIGH 0x10d0
#define SGE_DEBUG_DATA_LOW 0x10d4
#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
#define S_HP_INT_THRESH 28
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment