Commit f18a37ff authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-fixes'

Sudarsana Reddy Kalluru says:

====================
qed*: fix series.

The patch series contains several minor bug fixes for qed/qede modules.
Please consider applying this to 'net' branch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 396a30cc fabd545c
...@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, ...@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
if (!dcbx_info) if (!dcbx_info)
return -ENOMEM; return -ENOMEM;
memset(dcbx_info, 0, sizeof(*dcbx_info));
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) { if (rc) {
kfree(dcbx_info); kfree(dcbx_info);
...@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, ...@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
if (!dcbx_info) if (!dcbx_info)
return NULL; return NULL;
memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info); kfree(dcbx_info);
return NULL; return NULL;
......
...@@ -878,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -878,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
} }
} }
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
rc = qed_nic_setup(cdev); rc = qed_nic_setup(cdev);
if (rc) if (rc)
goto err; goto err;
......
...@@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq); ...@@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq); int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
u8 count); u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13 #define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128 #define NUM_RX_BDS_MIN 128
#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13 #define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
......
...@@ -756,6 +756,8 @@ static void qede_get_channels(struct net_device *dev, ...@@ -756,6 +756,8 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev); channels->max_combined = QEDE_MAX_RSS_CNT(edev);
channels->max_rx = QEDE_MAX_RSS_CNT(edev);
channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx; edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx; channels->tx_count = edev->fp_num_tx;
...@@ -820,6 +822,13 @@ static int qede_set_channels(struct net_device *dev, ...@@ -820,6 +822,13 @@ static int qede_set_channels(struct net_device *dev,
edev->req_queues = count; edev->req_queues = count;
edev->req_num_tx = channels->tx_count; edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count; edev->req_num_rx = channels->rx_count;
/* Reset the indirection table if rx queue count is updated */
if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
memset(&edev->rss_params.rss_ind_table, 0,
sizeof(edev->rss_params.rss_ind_table));
}
if (netif_running(dev)) if (netif_running(dev))
qede_reload(edev, NULL, NULL); qede_reload(edev, NULL, NULL);
...@@ -1053,6 +1062,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -1053,6 +1062,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
int i; int i;
if (edev->dev_info.common.num_hwfns > 1) {
DP_INFO(edev,
"RSS configuration is not supported for 100G devices\n");
return -EOPNOTSUPP;
}
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1184,7 +1199,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1184,7 +1199,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
} }
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++; txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL; txq->sw_tx_ring[idx].skb = NULL;
...@@ -1199,8 +1214,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) ...@@ -1199,8 +1214,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL; struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data; struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe; union eth_rx_cqe *cqe;
int i, rc = 0;
u8 *data_ptr; u8 *data_ptr;
int i;
for_each_queue(i) { for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
...@@ -1219,23 +1234,19 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) ...@@ -1219,23 +1234,19 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* queue and that the loopback traffic is not IP. * queue and that the loopback traffic is not IP.
*/ */
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
if (qede_has_rx_work(rxq))
break;
usleep_range(100, 200);
}
if (!qede_has_rx_work(rxq)) { if (!qede_has_rx_work(rxq)) {
DP_NOTICE(edev, "Failed to receive the traffic\n"); usleep_range(100, 200);
return -1; continue;
} }
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
/* Memory barrier to prevent the CPU from doing speculative reads of CQE /* Memory barrier to prevent the CPU from doing speculative
* / BD before reading hw_comp_cons. If the CQE is read before it is * reads of CQE/BD before reading hw_comp_cons. If the CQE is
* written by FW, then FW writes CQE and SB, and then the CPU reads the * read before it is written by FW, then FW writes CQE and SB,
* hw_comp_cons, it will use an old CQE. * and then the CPU reads the hw_comp_cons, it will use an old
* CQE.
*/ */
rmb(); rmb();
...@@ -1248,17 +1259,35 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) ...@@ -1248,17 +1259,35 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
fp_cqe = &cqe->fast_path_regular; fp_cqe = &cqe->fast_path_regular;
len = le16_to_cpu(fp_cqe->len_on_first_bd); len = le16_to_cpu(fp_cqe->len_on_first_bd);
data_ptr = (u8 *)(page_address(sw_rx_data->data) + data_ptr = (u8 *)(page_address(sw_rx_data->data) +
fp_cqe->placement_offset + sw_rx_data->page_offset); fp_cqe->placement_offset +
sw_rx_data->page_offset);
if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
ether_addr_equal(data_ptr + ETH_ALEN,
edev->ndev->dev_addr)) {
for (i = ETH_HLEN; i < len; i++) for (i = ETH_HLEN; i < len; i++)
if (data_ptr[i] != (unsigned char)(i & 0xff)) { if (data_ptr[i] != (unsigned char)(i & 0xff)) {
DP_NOTICE(edev, "Loopback test failed\n"); rc = -1;
break;
}
qede_recycle_rx_bd_ring(rxq, edev, 1); qede_recycle_rx_bd_ring(rxq, edev, 1);
return -1; qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
} }
DP_INFO(edev, "Not the transmitted packet\n");
qede_recycle_rx_bd_ring(rxq, edev, 1); qede_recycle_rx_bd_ring(rxq, edev, 1);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
return 0; if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
qede_update_rx_prod(edev, rxq);
return rc;
} }
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
......
...@@ -313,7 +313,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev, ...@@ -313,7 +313,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
split_bd_len = BD_UNMAP_LEN(split); split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++; bds_consumed++;
} }
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */ /* Unmap the data of the skb frags */
...@@ -359,7 +359,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, ...@@ -359,7 +359,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--; nbd--;
} }
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */ /* Unmap the data of the skb frags */
...@@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev, ...@@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return 0; return 0;
} }
static inline void qede_update_rx_prod(struct qede_dev *edev, void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
struct qede_rx_queue *rxq)
{ {
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
......
...@@ -146,6 +146,7 @@ enum qed_led_mode { ...@@ -146,6 +146,7 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF #define QED_COALESCE_MAX 0xFF
#define QED_DEFAULT_RX_USECS 12
/* forward */ /* forward */
struct qed_dev; struct qed_dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment