Commit f18a37ff authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-fixes'

Sudarsana Reddy Kalluru says:

====================
qed*: fix series.

The patch series contains several minor bug fixes for qed/qede modules.
Please consider applying this to 'net' branch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 396a30cc fabd545c
......@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
if (!dcbx_info)
return -ENOMEM;
memset(dcbx_info, 0, sizeof(*dcbx_info));
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
kfree(dcbx_info);
......@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
if (!dcbx_info)
return NULL;
memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
......
......@@ -878,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
}
}
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
rc = qed_nic_setup(cdev);
if (rc)
goto err;
......
......@@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
......
......@@ -756,6 +756,8 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
channels->max_rx = QEDE_MAX_RSS_CNT(edev);
channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
......@@ -820,6 +822,13 @@ static int qede_set_channels(struct net_device *dev,
edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
/* Reset the indirection table if rx queue count is updated */
if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
memset(&edev->rss_params.rss_ind_table, 0,
sizeof(edev->rss_params.rss_ind_table));
}
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
......@@ -1053,6 +1062,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
struct qede_dev *edev = netdev_priv(dev);
int i;
if (edev->dev_info.common.num_hwfns > 1) {
DP_INFO(edev,
"RSS configuration is not supported for 100G devices\n");
return -EOPNOTSUPP;
}
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
......@@ -1184,7 +1199,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL;
......@@ -1199,8 +1214,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
int i, rc = 0;
u8 *data_ptr;
int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
......@@ -1219,23 +1234,19 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
if (qede_has_rx_work(rxq))
break;
usleep_range(100, 200);
}
if (!qede_has_rx_work(rxq)) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
usleep_range(100, 200);
continue;
}
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
/* Memory barrier to prevent the CPU from doing speculative reads of CQE
* / BD before reading hw_comp_cons. If the CQE is read before it is
* written by FW, then FW writes CQE and SB, and then the CPU reads the
* hw_comp_cons, it will use an old CQE.
/* Memory barrier to prevent the CPU from doing speculative
* reads of CQE/BD before reading hw_comp_cons. If the CQE is
* read before it is written by FW, then FW writes CQE and SB,
* and then the CPU reads the hw_comp_cons, it will use an old
* CQE.
*/
rmb();
......@@ -1248,17 +1259,35 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
fp_cqe = &cqe->fast_path_regular;
len = le16_to_cpu(fp_cqe->len_on_first_bd);
data_ptr = (u8 *)(page_address(sw_rx_data->data) +
fp_cqe->placement_offset + sw_rx_data->page_offset);
fp_cqe->placement_offset +
sw_rx_data->page_offset);
if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
ether_addr_equal(data_ptr + ETH_ALEN,
edev->ndev->dev_addr)) {
for (i = ETH_HLEN; i < len; i++)
if (data_ptr[i] != (unsigned char)(i & 0xff)) {
DP_NOTICE(edev, "Loopback test failed\n");
rc = -1;
break;
}
qede_recycle_rx_bd_ring(rxq, edev, 1);
return -1;
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
}
DP_INFO(edev, "Not the transmitted packet\n");
qede_recycle_rx_bd_ring(rxq, edev, 1);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
return 0;
if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
qede_update_rx_prod(edev, rxq);
return rc;
}
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
......
......@@ -313,7 +313,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++;
}
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
......@@ -359,7 +359,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--;
}
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
......@@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return 0;
}
static inline void qede_update_rx_prod(struct qede_dev *edev,
struct qede_rx_queue *rxq)
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
......
......@@ -146,6 +146,7 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF
#define QED_DEFAULT_RX_USECS 12
/* forward */
struct qed_dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment