Commit 53d56f79 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-next'

Manish Chopra says:

====================
qed/qede: Enhancements

This patch series adds these below features support in qed/qede

1) Ntuple filter configuration [via ethtool -n/N]
2) EEE (energy efficient ethernet) support [ethtool --set-eee/show-eee]
3) Coalescing configuration support for VFs [via ethtool -c/C]

Please consider applying this to "net-next"

V1->V2:
* Fixes below Kbuild test robot warning.

drivers/net//ethernet/qlogic/qed/qed_l2.c:
In function 'qed_get_queue_coalesce':
drivers/net//ethernet/qlogic/qed/qed_l2.c:2137:8: error:
implicit declaration of function 'qed_vf_pf_get_coalesce'
[-Werror=implicit-function-declaration]
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e27a8792 41822878
...@@ -1684,6 +1684,8 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) ...@@ -1684,6 +1684,8 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
"Load request was sent. Load code: 0x%x\n", "Load request was sent. Load code: 0x%x\n",
load_code); load_code);
qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
p_hwfn->first_on_engine = (load_code == p_hwfn->first_on_engine = (load_code ==
...@@ -2472,6 +2474,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2472,6 +2474,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
struct qed_mcp_link_capabilities *p_caps;
struct qed_mcp_link_params *link; struct qed_mcp_link_params *link;
/* Read global nvm_cfg address */ /* Read global nvm_cfg address */
...@@ -2534,6 +2537,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2534,6 +2537,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Read default link configuration */ /* Read default link configuration */
link = &p_hwfn->mcp_info->link_input; link = &p_hwfn->mcp_info->link_input;
p_caps = &p_hwfn->mcp_info->link_capabilities;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
link_temp = qed_rd(p_hwfn, p_ptt, link_temp = qed_rd(p_hwfn, p_ptt,
...@@ -2588,10 +2592,45 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2588,10 +2592,45 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
link->loopback_mode = 0; link->loopback_mode = 0;
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
"Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr +
link->speed.forced_speed, link->speed.advertised_speeds, offsetof(struct nvm_cfg1_port, ext_phy));
link->speed.autoneg, link->pause.autoneg); link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
p_caps->default_eee = QED_MCP_EEE_ENABLED;
link->eee.enable = true;
switch (link_temp) {
case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
p_caps->default_eee = QED_MCP_EEE_DISABLED;
link->eee.enable = false;
break;
case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
break;
case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
p_caps->eee_lpi_timer =
EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
break;
case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
break;
}
link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
link->eee.tx_lpi_enable = link->eee.enable;
link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV;
} else {
p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED;
}
DP_VERBOSE(p_hwfn,
NETIF_MSG_LINK,
"Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
link->speed.forced_speed,
link->speed.advertised_speeds,
link->speed.autoneg,
link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer);
/* Read Multi-function information from shmem */ /* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset + addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
...@@ -2751,6 +2790,27 @@ static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2751,6 +2790,27 @@ static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_hw_info_port_num_ah(p_hwfn, p_ptt); qed_hw_info_port_num_ah(p_hwfn, p_ptt);
} }
static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_link_capabilities *p_caps;
u32 eee_status;
p_caps = &p_hwfn->mcp_info->link_capabilities;
if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED)
return;
p_caps->eee_speed_caps = 0;
eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, eee_status));
eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
EEE_SUPPORTED_SPEED_OFFSET;
if (eee_status & EEE_1G_SUPPORTED)
p_caps->eee_speed_caps |= QED_EEE_1G_ADV;
if (eee_status & EEE_10G_ADV)
p_caps->eee_speed_caps |= QED_EEE_10G_ADV;
}
static int static int
qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
...@@ -2767,6 +2827,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -2767,6 +2827,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
qed_hw_info_port_num(p_hwfn, p_ptt); qed_hw_info_port_num(p_hwfn, p_ptt);
qed_mcp_get_capabilities(p_hwfn, p_ptt);
qed_hw_get_nvm_info(p_hwfn, p_ptt); qed_hw_get_nvm_info(p_hwfn, p_ptt);
rc = qed_int_igu_read_cam(p_hwfn, p_ptt); rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
...@@ -2785,6 +2847,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -2785,6 +2847,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->func_info.ovlan; p_hwfn->mcp_info->func_info.ovlan;
qed_mcp_cmd_port_init(p_hwfn, p_ptt); qed_mcp_cmd_port_init(p_hwfn, p_ptt);
qed_get_eee_caps(p_hwfn, p_ptt);
} }
if (qed_mcp_is_init(p_hwfn)) { if (qed_mcp_is_init(p_hwfn)) {
...@@ -3630,7 +3694,7 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -3630,7 +3694,7 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
} }
p_coal_timeset = p_eth_qzone; p_coal_timeset = p_eth_qzone;
memset(p_coal_timeset, 0, eth_qzone_size); memset(p_eth_qzone, 0, eth_qzone_size);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
...@@ -3638,12 +3702,46 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -3638,12 +3702,46 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return 0; return 0;
} }
int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle)
u16 coalesce, u16 qid, u16 sb_id) {
struct qed_queue_cid *p_cid = p_handle;
struct qed_hwfn *p_hwfn;
struct qed_ptt *p_ptt;
int rc = 0;
p_hwfn = p_cid->p_owner;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid);
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
if (rx_coal) {
rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
if (rc)
goto out;
p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
}
if (tx_coal) {
rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
if (rc)
goto out;
p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
}
out:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 coalesce, struct qed_queue_cid *p_cid)
{ {
struct ustorm_eth_queue_zone eth_qzone; struct ustorm_eth_queue_zone eth_qzone;
u8 timeset, timer_res; u8 timeset, timer_res;
u16 fw_qid = 0;
u32 address; u32 address;
int rc; int rc;
...@@ -3660,32 +3758,29 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -3660,32 +3758,29 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
} }
timeset = (u8)(coalesce >> timer_res); timeset = (u8)(coalesce >> timer_res);
rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
if (rc) p_cid->sb_igu_id, false);
return rc;
rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
if (rc) if (rc)
goto out; goto out;
address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); address = BAR0_MAP_REG_USDM_RAM +
USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone, rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset); sizeof(struct ustorm_eth_queue_zone), timeset);
if (rc) if (rc)
goto out; goto out;
p_hwfn->cdev->rx_coalesce_usecs = coalesce;
out: out:
return rc; return rc;
} }
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
u16 coalesce, u16 qid, u16 sb_id) struct qed_ptt *p_ptt,
u16 coalesce, struct qed_queue_cid *p_cid)
{ {
struct xstorm_eth_queue_zone eth_qzone; struct xstorm_eth_queue_zone eth_qzone;
u8 timeset, timer_res; u8 timeset, timer_res;
u16 fw_qid = 0;
u32 address; u32 address;
int rc; int rc;
...@@ -3702,22 +3797,16 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -3702,22 +3797,16 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
} }
timeset = (u8)(coalesce >> timer_res); timeset = (u8)(coalesce >> timer_res);
rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
if (rc) p_cid->sb_igu_id, true);
return rc;
rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
if (rc) if (rc)
goto out; goto out;
address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); address = BAR0_MAP_REG_XSDM_RAM +
XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone, rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset); sizeof(struct xstorm_eth_queue_zone), timeset);
if (rc)
goto out;
p_hwfn->cdev->tx_coalesce_usecs = coalesce;
out: out:
return rc; return rc;
} }
......
...@@ -443,38 +443,35 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, ...@@ -443,38 +443,35 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf); struct qed_ptt *p_ptt, u16 id, bool is_vf);
/** /**
* @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue.
* The fact that we can configure coalescing to up to 511, but on varying
* accuracy [the bigger the value the less accurate] up to a mistake of 3usec
* for the highest values.
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_coal - store coalesce value read from the hardware.
* @param coalesce - Coalesce value in micro seconds. * @param p_handle
* @param qid - Queue index.
* @param qid - SB Id
* *
* @return int * @return int
*/ **/
int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
u16 coalesce, u16 qid, u16 sb_id);
/** /**
* @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and
* While the API allows setting coalescing per-qid, all tx queues sharing a * Tx queue. The fact that we can configure coalescing to up to 511, but on
* SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] * varying accuracy [the bigger the value the less accurate] up to a mistake
* of 3usec for the highest values.
* While the API allows setting coalescing per-qid, all queues sharing a SB
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break. * otherwise configuration would break.
* *
* @param p_hwfn *
* @param p_ptt * @param rx_coal - Rx Coalesce value in micro seconds.
* @param coalesce - Coalesce value in micro seconds. * @param tx_coal - TX Coalesce value in micro seconds.
* @param qid - Queue index. * @param p_handle
* @param qid - SB Id
* *
* @return int * @return int
*/ **/
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int
u16 coalesce, u16 qid, u16 sb_id); qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
const char *qed_hw_get_resc_name(enum qed_resources res_id); const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif #endif
...@@ -10825,6 +10825,17 @@ struct eth_phy_cfg { ...@@ -10825,6 +10825,17 @@ struct eth_phy_cfg {
#define ETH_LOOPBACK_EXT (3) #define ETH_LOOPBACK_EXT (3)
#define ETH_LOOPBACK_MAC (4) #define ETH_LOOPBACK_MAC (4)
u32 eee_cfg;
#define EEE_CFG_EEE_ENABLED BIT(0)
#define EEE_CFG_TX_LPI BIT(1)
#define EEE_CFG_ADV_SPEED_1G BIT(2)
#define EEE_CFG_ADV_SPEED_10G BIT(3)
#define EEE_TX_TIMER_USEC_MASK (0xfffffff0)
#define EEE_TX_TIMER_USEC_OFFSET 4
#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00)
#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100)
#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000)
u32 feature_config_flags; u32 feature_config_flags;
#define ETH_EEE_MODE_ADV_LPI (1 << 0) #define ETH_EEE_MODE_ADV_LPI (1 << 0)
}; };
...@@ -11242,6 +11253,25 @@ struct public_port { ...@@ -11242,6 +11253,25 @@ struct public_port {
u32 wol_pkt_len; u32 wol_pkt_len;
u32 wol_pkt_details; u32 wol_pkt_details;
struct dcb_dscp_map dcb_dscp_map; struct dcb_dscp_map dcb_dscp_map;
u32 eee_status;
#define EEE_ACTIVE_BIT BIT(0)
#define EEE_LD_ADV_STATUS_MASK 0x000000f0
#define EEE_LD_ADV_STATUS_OFFSET 4
#define EEE_1G_ADV BIT(1)
#define EEE_10G_ADV BIT(2)
#define EEE_LP_ADV_STATUS_MASK 0x00000f00
#define EEE_LP_ADV_STATUS_OFFSET 8
#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
#define EEE_SUPPORTED_SPEED_OFFSET 12
#define EEE_1G_SUPPORTED BIT(1)
#define EEE_10G_SUPPORTED BIT(2)
u32 eee_remote;
#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
#define EEE_REMOTE_TW_TX_OFFSET 0
#define EEE_REMOTE_TW_RX_MASK 0xffff0000
#define EEE_REMOTE_TW_RX_OFFSET 16
}; };
struct public_func { struct public_func {
...@@ -11570,6 +11600,9 @@ struct public_drv_mb { ...@@ -11570,6 +11600,9 @@ struct public_drv_mb {
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 #define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000 #define DRV_MSG_CODE_OS_WOL 0x002e0000
#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param; u32 drv_mb_param;
...@@ -11653,6 +11686,10 @@ struct public_drv_mb { ...@@ -11653,6 +11686,10 @@ struct public_drv_mb {
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 #define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00 #define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
u32 fw_mb_header; u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_UNSUPPORTED 0x00000000 #define FW_MSG_CODE_UNSUPPORTED 0x00000000
...@@ -11696,6 +11733,9 @@ struct public_drv_mb { ...@@ -11696,6 +11733,9 @@ struct public_drv_mb {
#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 #define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
/* get MFW feature support response */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
u32 drv_pulse_mb; u32 drv_pulse_mb;
...@@ -11891,7 +11931,16 @@ struct nvm_cfg1_port { ...@@ -11891,7 +11931,16 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
u32 phy_cfg; u32 phy_cfg;
u32 mgmt_traffic; u32 mgmt_traffic;
u32 ext_phy; u32 ext_phy;
/* EEE power saving mode */
#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000
#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
u32 mba_cfg1; u32 mba_cfg1;
u32 mba_cfg2; u32 mba_cfg2;
u32 vf_cfg; u32 vf_cfg;
......
...@@ -2047,6 +2047,106 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -2047,6 +2047,106 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_queue_cid *p_cid, u16 *p_rx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
u8 timer_res;
int rc;
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
(u64)(uintptr_t)&sb_entry, 2, 0);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
address = BAR0_MAP_REG_USDM_RAM +
USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
if (!is_valid)
return -EINVAL;
coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
*p_rx_coal = (u16)(coalesce << timer_res);
return 0;
}
int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_queue_cid *p_cid, u16 *p_tx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
u8 timer_res;
int rc;
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
(u64)(uintptr_t)&sb_entry, 2, 0);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
address = BAR0_MAP_REG_XSDM_RAM +
XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
if (!is_valid)
return -EINVAL;
coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
*p_tx_coal = (u16)(coalesce << timer_res);
return 0;
}
int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
{
struct qed_queue_cid *p_cid = handle;
struct qed_ptt *p_ptt;
int rc = 0;
if (IS_VF(p_hwfn->cdev)) {
rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
if (rc)
DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
return rc;
}
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
if (p_cid->b_is_rx) {
rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
if (rc)
goto out;
} else {
rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
if (rc)
goto out;
}
out:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
static int qed_fill_eth_dev_info(struct qed_dev *cdev, static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info) struct qed_dev_eth_info *info)
{ {
...@@ -2696,6 +2796,20 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, ...@@ -2696,6 +2796,20 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
return rc; return rc;
} }
static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
{
struct qed_queue_cid *p_cid = handle;
struct qed_hwfn *p_hwfn;
int rc;
p_hwfn = p_cid->p_owner;
rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
if (rc)
DP_NOTICE(p_hwfn, "Unable to read queue calescing\n");
return rc;
}
static int qed_fp_cqe_completion(struct qed_dev *dev, static int qed_fp_cqe_completion(struct qed_dev *dev,
u8 rss_id, struct eth_slow_path_rx_cqe *cqe) u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{ {
...@@ -2739,6 +2853,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { ...@@ -2739,6 +2853,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.tunn_config = &qed_tunn_configure, .tunn_config = &qed_tunn_configure,
.ntuple_filter_config = &qed_ntuple_arfs_filter_config, .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
.configure_arfs_searcher = &qed_configure_arfs_searcher, .configure_arfs_searcher = &qed_configure_arfs_searcher,
.get_coalesce = &qed_get_coalesce,
}; };
const struct qed_eth_ops *qed_get_eth_ops(void) const struct qed_eth_ops *qed_get_eth_ops(void)
......
...@@ -400,4 +400,20 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -400,4 +400,20 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u8 qed_mcast_bin_from_mac(u8 *mac); u8 qed_mcast_bin_from_mac(u8 *mac);
#endif /* _QED_L2_H */ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 coalesce, struct qed_queue_cid *p_cid);
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 coalesce, struct qed_queue_cid *p_cid);
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_queue_cid *p_cid, u16 *p_hw_coal);
int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_queue_cid *p_cid, u16 *p_hw_coal);
#endif
...@@ -954,9 +954,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -954,9 +954,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_tunnel_info tunn_info; struct qed_tunnel_info tunn_info;
const u8 *data = NULL; const u8 *data = NULL;
struct qed_hwfn *hwfn; struct qed_hwfn *hwfn;
#ifdef CONFIG_RFS_ACCEL
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
#endif
int rc = -EINVAL; int rc = -EINVAL;
if (qed_iov_wq_start(cdev)) if (qed_iov_wq_start(cdev))
...@@ -972,7 +970,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -972,7 +970,6 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err; goto err;
} }
#ifdef CONFIG_RFS_ACCEL
if (cdev->num_hwfns == 1) { if (cdev->num_hwfns == 1) {
p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (p_ptt) { if (p_ptt) {
...@@ -983,7 +980,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -983,7 +980,6 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err; goto err;
} }
} }
#endif
} }
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
...@@ -1091,12 +1087,10 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -1091,12 +1087,10 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (IS_PF(cdev)) if (IS_PF(cdev))
release_firmware(cdev->firmware); release_firmware(cdev->firmware);
#ifdef CONFIG_RFS_ACCEL
if (IS_PF(cdev) && (cdev->num_hwfns == 1) && if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
QED_LEADING_HWFN(cdev)->p_arfs_ptt) QED_LEADING_HWFN(cdev)->p_arfs_ptt)
qed_ptt_release(QED_LEADING_HWFN(cdev), qed_ptt_release(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->p_arfs_ptt); QED_LEADING_HWFN(cdev)->p_arfs_ptt);
#endif
qed_iov_wq_stop(cdev, false); qed_iov_wq_stop(cdev, false);
...@@ -1111,11 +1105,9 @@ static int qed_slowpath_stop(struct qed_dev *cdev) ...@@ -1111,11 +1105,9 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_ll2_dealloc_if(cdev); qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
#ifdef CONFIG_RFS_ACCEL
if (cdev->num_hwfns == 1) if (cdev->num_hwfns == 1)
qed_ptt_release(QED_LEADING_HWFN(cdev), qed_ptt_release(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->p_arfs_ptt); QED_LEADING_HWFN(cdev)->p_arfs_ptt);
#endif
qed_free_stream_mem(cdev); qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev)) if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true); qed_sriov_disable(cdev, true);
...@@ -1305,6 +1297,10 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) ...@@ -1305,6 +1297,10 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
} }
} }
if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
memcpy(&link_params->eee, &params->eee,
sizeof(link_params->eee));
rc = qed_mcp_set_link(hwfn, ptt, params->link_up); rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
qed_ptt_release(hwfn, ptt); qed_ptt_release(hwfn, ptt);
...@@ -1491,6 +1487,21 @@ static void qed_fill_link(struct qed_hwfn *hwfn, ...@@ -1491,6 +1487,21 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
if_link->lp_caps |= QED_LM_Asym_Pause_BIT; if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
if_link->eee_supported = false;
} else {
if_link->eee_supported = true;
if_link->eee_active = link.eee_active;
if_link->sup_caps = link_caps.eee_speed_caps;
/* MFW clears adv_caps on eee disable; use configured value */
if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
params.eee.adv_caps;
if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
if_link->eee.enable = params.eee.enable;
if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
}
} }
static void qed_get_current_link(struct qed_dev *cdev, static void qed_get_current_link(struct qed_dev *cdev,
...@@ -1557,36 +1568,10 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, ...@@ -1557,36 +1568,10 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
return rc; return rc;
} }
static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
{
*rx_coal = cdev->rx_coalesce_usecs;
*tx_coal = cdev->tx_coalesce_usecs;
}
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
u16 qid, u16 sb_id) void *handle)
{ {
struct qed_hwfn *hwfn; return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
struct qed_ptt *ptt;
int hwfn_index;
int status = 0;
hwfn_index = qid % cdev->num_hwfns;
hwfn = &cdev->hwfns[hwfn_index];
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
qid / cdev->num_hwfns, sb_id);
if (status)
goto out;
status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
qid / cdev->num_hwfns, sb_id);
out:
qed_ptt_release(hwfn, ptt);
return status;
} }
static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
...@@ -1735,7 +1720,6 @@ const struct qed_common_ops qed_common_ops_pass = { ...@@ -1735,7 +1720,6 @@ const struct qed_common_ops qed_common_ops_pass = {
.chain_alloc = &qed_chain_alloc, .chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free, .chain_free = &qed_chain_free,
.nvm_get_image = &qed_nvm_get_image, .nvm_get_image = &qed_nvm_get_image,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce, .set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led, .set_led = &qed_set_led,
.update_drv_state = &qed_update_drv_state, .update_drv_state = &qed_update_drv_state,
......
...@@ -1097,6 +1097,31 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, ...@@ -1097,6 +1097,31 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
} }
static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_link_state *p_link)
{
u32 eee_status, val;
p_link->eee_adv_caps = 0;
p_link->eee_lp_adv_caps = 0;
eee_status = qed_rd(p_hwfn,
p_ptt,
p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, eee_status));
p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
if (val & EEE_1G_ADV)
p_link->eee_adv_caps |= QED_EEE_1G_ADV;
if (val & EEE_10G_ADV)
p_link->eee_adv_caps |= QED_EEE_10G_ADV;
val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
if (val & EEE_1G_ADV)
p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
if (val & EEE_10G_ADV)
p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
}
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_reset) struct qed_ptt *p_ptt, bool b_reset)
{ {
...@@ -1228,6 +1253,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, ...@@ -1228,6 +1253,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
qed_link_update(p_hwfn); qed_link_update(p_hwfn);
out: out:
spin_unlock_bh(&p_hwfn->mcp_info->link_lock); spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
...@@ -1251,6 +1279,19 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) ...@@ -1251,6 +1279,19 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg.adv_speed = params->speed.advertised_speeds; phy_cfg.adv_speed = params->speed.advertised_speeds;
phy_cfg.loopback_mode = params->loopback_mode; phy_cfg.loopback_mode = params->loopback_mode;
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
if (params->eee.enable)
phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
if (params->eee.tx_lpi_enable)
phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
if (params->eee.adv_caps & QED_EEE_1G_ADV)
phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
if (params->eee.adv_caps & QED_EEE_10G_ADV)
phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
EEE_TX_TIMER_USEC_OFFSET) &
EEE_TX_TIMER_USEC_MASK;
}
p_hwfn->b_drv_link_init = b_up; p_hwfn->b_drv_link_init = b_up;
...@@ -2822,3 +2863,28 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, ...@@ -2822,3 +2863,28 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
p_unlock->resource = resource; p_unlock->resource = resource;
} }
} }
int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 mcp_resp;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
if (!rc)
DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
"MFW supported features: %08x\n",
p_hwfn->mcp_info->capabilities);
return rc;
}
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 mcp_resp, mcp_param, features;
features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
}
...@@ -53,15 +53,25 @@ struct qed_mcp_link_pause_params { ...@@ -53,15 +53,25 @@ struct qed_mcp_link_pause_params {
bool forced_tx; bool forced_tx;
}; };
enum qed_mcp_eee_mode {
QED_MCP_EEE_DISABLED,
QED_MCP_EEE_ENABLED,
QED_MCP_EEE_UNSUPPORTED
};
struct qed_mcp_link_params { struct qed_mcp_link_params {
struct qed_mcp_link_speed_params speed; struct qed_mcp_link_speed_params speed;
struct qed_mcp_link_pause_params pause; struct qed_mcp_link_pause_params pause;
u32 loopback_mode; u32 loopback_mode;
struct qed_link_eee_params eee;
}; };
struct qed_mcp_link_capabilities { struct qed_mcp_link_capabilities {
u32 speed_capabilities; u32 speed_capabilities;
bool default_speed_autoneg; bool default_speed_autoneg;
enum qed_mcp_eee_mode default_eee;
u32 eee_lpi_timer;
u8 eee_speed_caps;
}; };
struct qed_mcp_link_state { struct qed_mcp_link_state {
...@@ -102,6 +112,9 @@ struct qed_mcp_link_state { ...@@ -102,6 +112,9 @@ struct qed_mcp_link_state {
u8 partner_adv_pause; u8 partner_adv_pause;
bool sfp_tx_fault; bool sfp_tx_fault;
bool eee_active;
u8 eee_adv_caps;
u8 eee_lp_adv_caps;
}; };
struct qed_mcp_function_info { struct qed_mcp_function_info {
...@@ -546,6 +559,9 @@ struct qed_mcp_info { ...@@ -546,6 +559,9 @@ struct qed_mcp_info {
u8 *mfw_mb_shadow; u8 *mfw_mb_shadow;
u16 mfw_mb_length; u16 mfw_mb_length;
u32 mcp_hist; u32 mcp_hist;
/* Capabilties negotiated with the MFW */
u32 capabilities;
}; };
struct qed_mcp_mb_params { struct qed_mcp_mb_params {
...@@ -925,5 +941,20 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, ...@@ -925,5 +941,20 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
struct qed_resc_unlock_params *p_unlock, struct qed_resc_unlock_params *p_unlock,
enum qed_resc_lock enum qed_resc_lock
resource, bool b_is_permanent); resource, bool b_is_permanent);
/**
* @brief Learn of supported MFW features; To be done during early init
*
* @param p_hwfn
* @param p_ptt
*/
int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Inform MFW of set of features supported by driver. Should be done
* inside the content of the LOAD_REQ.
*
* @param p_hwfn
* @param p_ptt
*/
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#endif #endif
...@@ -3400,6 +3400,157 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, ...@@ -3400,6 +3400,157 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
length, status); length, status);
} }
static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf)
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_read_coal_resp_tlv *p_resp;
struct vfpf_read_coal_req_tlv *req;
u8 status = PFVF_STATUS_FAILURE;
struct qed_vf_queue *p_queue;
struct qed_queue_cid *p_cid;
u16 coal = 0, qid, i;
bool b_is_rx;
int rc = 0;
mbx->offset = (u8 *)mbx->reply_virt;
req = &mbx->req_virt->read_coal_req;
qid = req->qid;
b_is_rx = req->is_rx ? true : false;
if (b_is_rx) {
if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
QED_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d]: Invalid Rx queue_id = %d\n",
p_vf->abs_vf_id, qid);
goto send_resp;
}
p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
if (rc)
goto send_resp;
} else {
if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
QED_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d]: Invalid Tx queue_id = %d\n",
p_vf->abs_vf_id, qid);
goto send_resp;
}
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
p_queue = &p_vf->vf_queues[qid];
if ((!p_queue->cids[i].p_cid) ||
(!p_queue->cids[i].b_is_tx))
continue;
p_cid = p_queue->cids[i].p_cid;
rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
if (rc)
goto send_resp;
break;
}
}
status = PFVF_STATUS_SUCCESS;
send_resp:
p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
sizeof(*p_resp));
p_resp->coal = coal;
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
}
static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_update_coalesce *req;
u8 status = PFVF_STATUS_FAILURE;
struct qed_queue_cid *p_cid;
u16 rx_coal, tx_coal;
int rc = 0, i;
u16 qid;
req = &mbx->req_virt->update_coalesce;
rx_coal = req->rx_coal;
tx_coal = req->tx_coal;
qid = req->qid;
if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d]: Invalid Rx queue_id = %d\n",
vf->abs_vf_id, qid);
goto out;
}
if (!qed_iov_validate_txq(p_hwfn, vf, qid,
QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d]: Invalid Tx queue_id = %d\n",
vf->abs_vf_id, qid);
goto out;
}
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
vf->abs_vf_id, rx_coal, tx_coal, qid);
if (rx_coal) {
p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
if (rc) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d]: Unable to set rx queue = %d coalesce\n",
vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
goto out;
}
vf->rx_coal = rx_coal;
}
if (tx_coal) {
struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
if (!p_queue->cids[i].p_cid)
continue;
if (!p_queue->cids[i].b_is_tx)
continue;
rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
p_queue->cids[i].p_cid);
if (rc) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d]: Unable to set tx queue coalesce\n",
vf->abs_vf_id);
goto out;
}
}
vf->tx_coal = tx_coal;
}
status = PFVF_STATUS_SUCCESS;
out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
sizeof(struct pfvf_def_resp_tlv), status);
}
static int static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
...@@ -3725,6 +3876,12 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, ...@@ -3725,6 +3876,12 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_UPDATE_TUNN_PARAM: case CHANNEL_TLV_UPDATE_TUNN_PARAM:
qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
break; break;
case CHANNEL_TLV_COALESCE_UPDATE:
qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_COALESCE_READ:
qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
break;
} }
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV, DP_VERBOSE(p_hwfn, QED_MSG_IOV,
......
...@@ -217,6 +217,9 @@ struct qed_vf_info { ...@@ -217,6 +217,9 @@ struct qed_vf_info {
u8 num_rxqs; u8 num_rxqs;
u8 num_txqs; u8 num_txqs;
u16 rx_coal;
u16 tx_coal;
u8 num_sbs; u8 num_sbs;
u8 num_mac_filters; u8 num_mac_filters;
......
...@@ -1343,6 +1343,81 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) ...@@ -1343,6 +1343,81 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
return rc; return rc;
} }
int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
u16 *p_coal, struct qed_queue_cid *p_cid)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_read_coal_resp_tlv *resp;
struct vfpf_read_coal_req_tlv *req;
int rc;
/* clear mailbox and prep header tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req));
req->qid = p_cid->rel.queue_id;
req->is_rx = p_cid->b_is_rx ? 1 : 0;
qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->read_coal_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
goto exit;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
goto exit;
*p_coal = resp->coal;
exit:
qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
int
qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_update_coalesce *req;
struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep header tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req));
req->rx_coal = rx_coal;
req->tx_coal = tx_coal;
req->qid = p_cid->rel.queue_id;
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
rx_coal, tx_coal, req->qid);
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
goto exit;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
goto exit;
if (rx_coal)
p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
if (tx_coal)
p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
exit:
qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
......
...@@ -497,6 +497,27 @@ struct tlv_buffer_size { ...@@ -497,6 +497,27 @@ struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE]; u8 tlv_buffer[TLV_BUFFER_SIZE];
}; };
struct vfpf_update_coalesce {
struct vfpf_first_tlv first_tlv;
u16 rx_coal;
u16 tx_coal;
u16 qid;
u8 padding[2];
};
struct vfpf_read_coal_req_tlv {
struct vfpf_first_tlv first_tlv;
u16 qid;
u8 is_rx;
u8 padding[5];
};
struct pfvf_read_coal_resp_tlv {
struct pfvf_tlv hdr;
u16 coal;
u8 padding[6];
};
union vfpf_tlvs { union vfpf_tlvs {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire; struct vfpf_acquire_tlv acquire;
...@@ -509,7 +530,8 @@ union vfpf_tlvs { ...@@ -509,7 +530,8 @@ union vfpf_tlvs {
struct vfpf_vport_update_tlv vport_update; struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter; struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update; struct vfpf_update_tunn_param_tlv tunn_param_update;
struct channel_list_end_tlv list_end; struct vfpf_update_coalesce update_coalesce;
struct vfpf_read_coal_req_tlv read_coal_req;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
...@@ -519,6 +541,7 @@ union pfvf_tlvs { ...@@ -519,6 +541,7 @@ union pfvf_tlvs {
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start; struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp; struct pfvf_update_tunn_param_tlv tunn_param_resp;
struct pfvf_read_coal_resp_tlv read_coal_resp;
}; };
enum qed_bulletin_bit { enum qed_bulletin_bit {
...@@ -624,8 +647,9 @@ enum { ...@@ -624,8 +647,9 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM, CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_RESERVED, CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID, CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
CHANNEL_TLV_MAX, CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs. /* Required for iterating over vport-update tlvs.
...@@ -677,6 +701,31 @@ struct qed_vf_iov { ...@@ -677,6 +701,31 @@ struct qed_vf_iov {
bool b_doorbell_bar; bool b_doorbell_bar;
}; };
/**
* @brief VF - Set Rx/Tx coalesce per VF's relative queue.
* Coalesce value '0' will omit the configuration.
*
* @param p_hwfn
* @param rx_coal - coalesce value in micro second for rx queue
* @param tx_coal - coalesce value in micro second for tx queue
* @param p_cid - queue cid
*
**/
int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal,
u16 tx_coal, struct qed_queue_cid *p_cid);
/**
* @brief VF - Get coalesce per VF's relative queue.
*
* @param p_hwfn
* @param p_coal - coalesce value in micro second for VF queues.
* @param p_cid - queue cid
*
**/
int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
u16 *p_coal, struct qed_queue_cid *p_cid);
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
/** /**
* @brief Read the VF bulletin and act on it if needed * @brief Read the VF bulletin and act on it if needed
......
...@@ -160,6 +160,8 @@ struct qede_rdma_dev { ...@@ -160,6 +160,8 @@ struct qede_rdma_dev {
struct qede_ptp; struct qede_ptp;
#define QEDE_RFS_MAX_FLTR 256
struct qede_dev { struct qede_dev {
struct qed_dev *cdev; struct qed_dev *cdev;
struct net_device *ndev; struct net_device *ndev;
...@@ -241,9 +243,7 @@ struct qede_dev { ...@@ -241,9 +243,7 @@ struct qede_dev {
u16 vxlan_dst_port; u16 vxlan_dst_port;
u16 geneve_dst_port; u16 geneve_dst_port;
#ifdef CONFIG_RFS_ACCEL
struct qede_arfs *arfs; struct qede_arfs *arfs;
#endif
bool wol_enabled; bool wol_enabled;
struct qede_rdma_dev rdma_info; struct qede_rdma_dev rdma_info;
...@@ -447,16 +447,21 @@ struct qede_fastpath { ...@@ -447,16 +447,21 @@ struct qede_fastpath {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id); u16 rxq_index, u32 flow_id);
#define QEDE_SP_ARFS_CONFIG 4
#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
#endif
void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr); void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev); void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
void qede_free_arfs(struct qede_dev *edev); void qede_free_arfs(struct qede_dev *edev);
int qede_alloc_arfs(struct qede_dev *edev); int qede_alloc_arfs(struct qede_dev *edev);
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
#define QEDE_SP_ARFS_CONFIG 4 int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
#define QEDE_SP_TASK_POLL_DELAY (5 * HZ) int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
#define QEDE_RFS_MAX_FLTR 256 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
#endif u32 *rule_locs);
int qede_get_arfs_filter_count(struct qede_dev *edev);
struct qede_reload_args { struct qede_reload_args {
void (*func)(struct qede_dev *edev, struct qede_reload_args *args); void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
......
...@@ -702,24 +702,62 @@ static u32 qede_get_link(struct net_device *dev) ...@@ -702,24 +702,62 @@ static u32 qede_get_link(struct net_device *dev)
static int qede_get_coalesce(struct net_device *dev, static int qede_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal) struct ethtool_coalesce *coal)
{ {
void *rx_handle = NULL, *tx_handle = NULL;
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
u16 rxc, txc; u16 rx_coal, tx_coal, i, rc = 0;
struct qede_fastpath *fp;
rx_coal = QED_DEFAULT_RX_USECS;
tx_coal = QED_DEFAULT_TX_USECS;
memset(coal, 0, sizeof(struct ethtool_coalesce)); memset(coal, 0, sizeof(struct ethtool_coalesce));
edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
coal->rx_coalesce_usecs = rxc; __qede_lock(edev);
coal->tx_coalesce_usecs = txc; if (edev->state == QEDE_STATE_OPEN) {
for_each_queue(i) {
fp = &edev->fp_array[i];
return 0; if (fp->type & QEDE_FASTPATH_RX) {
rx_handle = fp->rxq->handle;
break;
}
}
rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle);
if (rc) {
DP_INFO(edev, "Read Rx coalesce error\n");
goto out;
}
for_each_queue(i) {
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
tx_handle = fp->txq->handle;
break;
}
}
rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle);
if (rc)
DP_INFO(edev, "Read Tx coalesce error\n");
}
out:
__qede_unlock(edev);
coal->rx_coalesce_usecs = rx_coal;
coal->tx_coalesce_usecs = tx_coal;
return rc;
} }
static int qede_set_coalesce(struct net_device *dev, static int qede_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal) struct ethtool_coalesce *coal)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
struct qede_fastpath *fp;
int i, rc = 0; int i, rc = 0;
u16 rxc, txc, sb_id; u16 rxc, txc;
if (!netif_running(dev)) { if (!netif_running(dev)) {
DP_INFO(edev, "Interface is down\n"); DP_INFO(edev, "Interface is down\n");
...@@ -730,23 +768,38 @@ static int qede_set_coalesce(struct net_device *dev, ...@@ -730,23 +768,38 @@ static int qede_set_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs > QED_COALESCE_MAX) { coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
DP_INFO(edev, DP_INFO(edev,
"Can't support requested %s coalesce value [max supported value %d]\n", "Can't support requested %s coalesce value [max supported value %d]\n",
coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" :
: "tx", "tx", QED_COALESCE_MAX);
QED_COALESCE_MAX);
return -EINVAL; return -EINVAL;
} }
rxc = (u16)coal->rx_coalesce_usecs; rxc = (u16)coal->rx_coalesce_usecs;
txc = (u16)coal->tx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs;
for_each_queue(i) { for_each_queue(i) {
sb_id = edev->fp_array[i].sb_info->igu_sb_id; fp = &edev->fp_array[i];
rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
(u16)i, sb_id); if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
rc = edev->ops->common->set_coalesce(edev->cdev,
rxc, 0,
fp->rxq->handle);
if (rc) {
DP_INFO(edev,
"Set RX coalesce error, rc = %d\n", rc);
return rc;
}
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
rc = edev->ops->common->set_coalesce(edev->cdev,
0, txc,
fp->txq->handle);
if (rc) { if (rc) {
DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); DP_INFO(edev,
"Set TX coalesce error, rc = %d\n", rc);
return rc; return rc;
} }
} }
}
return rc; return rc;
} }
...@@ -1045,20 +1098,34 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) ...@@ -1045,20 +1098,34 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
} }
static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules __always_unused) u32 *rule_locs)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
int rc = 0;
switch (info->cmd) { switch (info->cmd) {
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
info->data = QEDE_RSS_COUNT(edev); info->data = QEDE_RSS_COUNT(edev);
return 0; break;
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
return qede_get_rss_flags(edev, info); rc = qede_get_rss_flags(edev, info);
break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = qede_get_arfs_filter_count(edev);
info->data = QEDE_RFS_MAX_FLTR;
break;
case ETHTOOL_GRXCLSRULE:
rc = qede_get_cls_rule_entry(edev, info);
break;
case ETHTOOL_GRXCLSRLALL:
rc = qede_get_cls_rule_all(edev, info, rule_locs);
break;
default: default:
DP_ERR(edev, "Command parameters not supported\n"); DP_ERR(edev, "Command parameters not supported\n");
return -EOPNOTSUPP; rc = -EOPNOTSUPP;
} }
return rc;
} }
static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
...@@ -1168,14 +1235,24 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) ...@@ -1168,14 +1235,24 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
int rc;
switch (info->cmd) { switch (info->cmd) {
case ETHTOOL_SRXFH: case ETHTOOL_SRXFH:
return qede_set_rss_flags(edev, info); rc = qede_set_rss_flags(edev, info);
break;
case ETHTOOL_SRXCLSRLINS:
rc = qede_add_cls_rule(edev, info);
break;
case ETHTOOL_SRXCLSRLDEL:
rc = qede_del_cls_rule(edev, info);
break;
default: default:
DP_INFO(edev, "Command parameters not supported\n"); DP_INFO(edev, "Command parameters not supported\n");
return -EOPNOTSUPP; rc = -EOPNOTSUPP;
} }
return rc;
} }
static u32 qede_get_rxfh_indir_size(struct net_device *dev) static u32 qede_get_rxfh_indir_size(struct net_device *dev)
...@@ -1607,6 +1684,87 @@ static int qede_get_tunable(struct net_device *dev, ...@@ -1607,6 +1684,87 @@ static int qede_get_tunable(struct net_device *dev,
return 0; return 0;
} }
static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
{
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
memset(&current_link, 0, sizeof(current_link));
edev->ops->common->get_link(edev->cdev, &current_link);
if (!current_link.eee_supported) {
DP_INFO(edev, "EEE is not supported\n");
return -EOPNOTSUPP;
}
if (current_link.eee.adv_caps & QED_EEE_1G_ADV)
edata->advertised = ADVERTISED_1000baseT_Full;
if (current_link.eee.adv_caps & QED_EEE_10G_ADV)
edata->advertised |= ADVERTISED_10000baseT_Full;
if (current_link.sup_caps & QED_EEE_1G_ADV)
edata->supported = ADVERTISED_1000baseT_Full;
if (current_link.sup_caps & QED_EEE_10G_ADV)
edata->supported |= ADVERTISED_10000baseT_Full;
if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV)
edata->lp_advertised = ADVERTISED_1000baseT_Full;
if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV)
edata->lp_advertised |= ADVERTISED_10000baseT_Full;
edata->tx_lpi_timer = current_link.eee.tx_lpi_timer;
edata->eee_enabled = current_link.eee.enable;
edata->tx_lpi_enabled = current_link.eee.tx_lpi_enable;
edata->eee_active = current_link.eee_active;
return 0;
}
static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
{
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
if (!edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev, "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
memset(&current_link, 0, sizeof(current_link));
edev->ops->common->get_link(edev->cdev, &current_link);
if (!current_link.eee_supported) {
DP_INFO(edev, "EEE is not supported\n");
return -EOPNOTSUPP;
}
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG;
if (!(edata->advertised & (ADVERTISED_1000baseT_Full |
ADVERTISED_10000baseT_Full)) ||
((edata->advertised & (ADVERTISED_1000baseT_Full |
ADVERTISED_10000baseT_Full)) !=
edata->advertised)) {
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Invalid advertised capabilities %d\n",
edata->advertised);
return -EINVAL;
}
if (edata->advertised & ADVERTISED_1000baseT_Full)
params.eee.adv_caps = QED_EEE_1G_ADV;
if (edata->advertised & ADVERTISED_10000baseT_Full)
params.eee.adv_caps |= QED_EEE_10G_ADV;
params.eee.enable = edata->eee_enabled;
params.eee.tx_lpi_enable = edata->tx_lpi_enabled;
params.eee.tx_lpi_timer = edata->tx_lpi_timer;
params.link_up = true;
edev->ops->common->set_link(edev->cdev, &params);
return 0;
}
static const struct ethtool_ops qede_ethtool_ops = { static const struct ethtool_ops qede_ethtool_ops = {
.get_link_ksettings = qede_get_link_ksettings, .get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings, .set_link_ksettings = qede_set_link_ksettings,
...@@ -1640,6 +1798,9 @@ static const struct ethtool_ops qede_ethtool_ops = { ...@@ -1640,6 +1798,9 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_channels = qede_get_channels, .get_channels = qede_get_channels,
.set_channels = qede_set_channels, .set_channels = qede_set_channels,
.self_test = qede_self_test, .self_test = qede_self_test,
.get_eee = qede_get_eee,
.set_eee = qede_set_eee,
.get_tunable = qede_get_tunable, .get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable, .set_tunable = qede_set_tunable,
}; };
...@@ -1650,6 +1811,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = { ...@@ -1650,6 +1811,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_msglevel = qede_get_msglevel, .get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel, .set_msglevel = qede_set_msglevel,
.get_link = qede_get_link, .get_link = qede_get_link,
.get_coalesce = qede_get_coalesce,
.set_coalesce = qede_set_coalesce,
.get_ringparam = qede_get_ringparam, .get_ringparam = qede_get_ringparam,
.set_ringparam = qede_set_ringparam, .set_ringparam = qede_set_ringparam,
.get_strings = qede_get_strings, .get_strings = qede_get_strings,
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include "qede.h" #include "qede.h"
#ifdef CONFIG_RFS_ACCEL
struct qede_arfs_tuple { struct qede_arfs_tuple {
union { union {
__be32 src_ipv4; __be32 src_ipv4;
...@@ -76,10 +75,12 @@ struct qede_arfs_fltr_node { ...@@ -76,10 +75,12 @@ struct qede_arfs_fltr_node {
u16 next_rxq_id; u16 next_rxq_id;
bool filter_op; bool filter_op;
bool used; bool used;
u8 fw_rc;
struct hlist_node node; struct hlist_node node;
}; };
struct qede_arfs { struct qede_arfs {
#define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
#define QEDE_ARFS_POLL_COUNT 100 #define QEDE_ARFS_POLL_COUNT 100
#define QEDE_RFS_FLW_BITSHIFT (4) #define QEDE_RFS_FLW_BITSHIFT (4)
#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1) #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
...@@ -121,11 +122,56 @@ qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) ...@@ -121,11 +122,56 @@ qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
kfree(fltr); kfree(fltr);
} }
static int
qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
struct qede_arfs_fltr_node *fltr,
u16 bucket_idx)
{
fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
fltr->buf_len, DMA_TO_DEVICE);
if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
qede_free_arfs_filter(edev, fltr);
return -ENOMEM;
}
INIT_HLIST_NODE(&fltr->node);
hlist_add_head(&fltr->node,
QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
edev->arfs->filter_count++;
if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
edev->ops->configure_arfs_searcher(edev->cdev, true);
edev->arfs->enable = true;
}
return 0;
}
static void
qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
struct qede_arfs_fltr_node *fltr)
{
hlist_del(&fltr->node);
dma_unmap_single(&edev->pdev->dev, fltr->mapping,
fltr->buf_len, DMA_TO_DEVICE);
qede_free_arfs_filter(edev, fltr);
edev->arfs->filter_count--;
if (!edev->arfs->filter_count && edev->arfs->enable) {
edev->arfs->enable = false;
edev->ops->configure_arfs_searcher(edev->cdev, false);
}
}
void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
{ {
struct qede_arfs_fltr_node *fltr = filter; struct qede_arfs_fltr_node *fltr = filter;
struct qede_dev *edev = dev; struct qede_dev *edev = dev;
fltr->fw_rc = fw_rc;
if (fw_rc) { if (fw_rc) {
DP_NOTICE(edev, DP_NOTICE(edev,
"Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
...@@ -185,18 +231,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) ...@@ -185,18 +231,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) && if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
!fltr->used) || free_fltr) { !fltr->used) || free_fltr) {
hlist_del(&fltr->node); qede_dequeue_fltr_and_config_searcher(edev,
dma_unmap_single(&edev->pdev->dev, fltr);
fltr->mapping,
fltr->buf_len, DMA_TO_DEVICE);
qede_free_arfs_filter(edev, fltr);
edev->arfs->filter_count--;
} else { } else {
if ((rps_may_expire_flow(edev->ndev, bool flow_exp = false;
#ifdef CONFIG_RFS_ACCEL
flow_exp = rps_may_expire_flow(edev->ndev,
fltr->rxq_id, fltr->rxq_id,
fltr->flow_id, fltr->flow_id,
fltr->sw_id) || del) && fltr->sw_id);
!free_fltr) #endif
if ((flow_exp || del) && !free_fltr)
qede_configure_arfs_fltr(edev, fltr, qede_configure_arfs_fltr(edev, fltr,
fltr->rxq_id, fltr->rxq_id,
false); false);
...@@ -213,10 +258,12 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) ...@@ -213,10 +258,12 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
edev->arfs->enable = false; edev->arfs->enable = false;
edev->ops->configure_arfs_searcher(edev->cdev, false); edev->ops->configure_arfs_searcher(edev->cdev, false);
} }
#ifdef CONFIG_RFS_ACCEL
} else { } else {
set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, schedule_delayed_work(&edev->sp_task,
QEDE_SP_TASK_POLL_DELAY); QEDE_SP_TASK_POLL_DELAY);
#endif
} }
spin_unlock_bh(&edev->arfs->arfs_list_lock); spin_unlock_bh(&edev->arfs->arfs_list_lock);
...@@ -258,25 +305,26 @@ int qede_alloc_arfs(struct qede_dev *edev) ...@@ -258,25 +305,26 @@ int qede_alloc_arfs(struct qede_dev *edev)
spin_lock_init(&edev->arfs->arfs_list_lock); spin_lock_init(&edev->arfs->arfs_list_lock);
for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]); INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
if (!edev->ndev->rx_cpu_rmap) { sizeof(long));
if (!edev->arfs->arfs_fltr_bmap) {
vfree(edev->arfs); vfree(edev->arfs);
edev->arfs = NULL; edev->arfs = NULL;
return -ENOMEM; return -ENOMEM;
} }
edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) * #ifdef CONFIG_RFS_ACCEL
sizeof(long)); edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
if (!edev->arfs->arfs_fltr_bmap) { if (!edev->ndev->rx_cpu_rmap) {
free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); vfree(edev->arfs->arfs_fltr_bmap);
edev->ndev->rx_cpu_rmap = NULL; edev->arfs->arfs_fltr_bmap = NULL;
vfree(edev->arfs); vfree(edev->arfs);
edev->arfs = NULL; edev->arfs = NULL;
return -ENOMEM; return -ENOMEM;
} }
#endif
return 0; return 0;
} }
...@@ -285,16 +333,19 @@ void qede_free_arfs(struct qede_dev *edev) ...@@ -285,16 +333,19 @@ void qede_free_arfs(struct qede_dev *edev)
if (!edev->arfs) if (!edev->arfs)
return; return;
#ifdef CONFIG_RFS_ACCEL
if (edev->ndev->rx_cpu_rmap) if (edev->ndev->rx_cpu_rmap)
free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
edev->ndev->rx_cpu_rmap = NULL; edev->ndev->rx_cpu_rmap = NULL;
#endif
vfree(edev->arfs->arfs_fltr_bmap); vfree(edev->arfs->arfs_fltr_bmap);
edev->arfs->arfs_fltr_bmap = NULL; edev->arfs->arfs_fltr_bmap = NULL;
vfree(edev->arfs); vfree(edev->arfs);
edev->arfs = NULL; edev->arfs = NULL;
} }
#ifdef CONFIG_RFS_ACCEL
static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos, static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
...@@ -394,9 +445,8 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -394,9 +445,8 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_lock_bh(&edev->arfs->arfs_list_lock); spin_lock_bh(&edev->arfs->arfs_list_lock);
n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx], n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
skb, ports[0], ports[1], ip_proto); skb, ports[0], ports[1], ip_proto);
if (n) { if (n) {
/* Filter match */ /* Filter match */
n->next_rxq_id = rxq_index; n->next_rxq_id = rxq_index;
...@@ -448,23 +498,9 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -448,23 +498,9 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
n->tuple.ip_proto = ip_proto; n->tuple.ip_proto = ip_proto;
memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
n->mapping = dma_map_single(&edev->pdev->dev, n->data, rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
n->buf_len, DMA_TO_DEVICE); if (rc)
if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
qede_free_arfs_filter(edev, n);
rc = -ENOMEM;
goto ret_unlock; goto ret_unlock;
}
INIT_HLIST_NODE(&n->node);
hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
edev->arfs->filter_count++;
if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
edev->ops->configure_arfs_searcher(edev->cdev, true);
edev->arfs->enable = true;
}
qede_configure_arfs_fltr(edev, n, n->rxq_id, true); qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
...@@ -472,6 +508,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -472,6 +508,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0); schedule_delayed_work(&edev->sp_task, 0);
return n->sw_id; return n->sw_id;
ret_unlock: ret_unlock:
...@@ -1263,3 +1300,371 @@ void qede_config_rx_mode(struct net_device *ndev) ...@@ -1263,3 +1300,371 @@ void qede_config_rx_mode(struct net_device *ndev)
out: out:
kfree(uc_macs); kfree(uc_macs);
} }
static struct qede_arfs_fltr_node *
qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
{
struct qede_arfs_fltr_node *fltr;
hlist_for_each_entry(fltr, head, node)
if (location == fltr->sw_id)
return fltr;
return NULL;
}
static bool
qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos,
struct ethtool_rx_flow_spec *fsp,
__be16 proto)
{
if (proto == htons(ETH_P_IP)) {
struct ethtool_tcpip4_spec *ip;
ip = &fsp->h_u.tcp_ip4_spec;
if (tpos->tuple.src_ipv4 == ip->ip4src &&
tpos->tuple.dst_ipv4 == ip->ip4dst)
return true;
else
return false;
} else {
struct ethtool_tcpip6_spec *ip6;
struct in6_addr *src;
ip6 = &fsp->h_u.tcp_ip6_spec;
src = &tpos->tuple.src_ipv6;
if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) &&
!memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst,
sizeof(struct in6_addr)))
return true;
else
return false;
}
return false;
}
int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct qede_arfs_fltr_node *fltr;
struct hlist_head *head;
int cnt = 0, rc = 0;
info->data = QEDE_RFS_MAX_FLTR;
__qede_lock(edev);
if (!edev->arfs) {
rc = -EPERM;
goto unlock;
}
head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
hlist_for_each_entry(fltr, head, node) {
if (cnt == info->rule_cnt) {
rc = -EMSGSIZE;
goto unlock;
}
rule_locs[cnt] = fltr->sw_id;
cnt++;
}
info->rule_cnt = cnt;
unlock:
__qede_unlock(edev);
return rc;
}
int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct qede_arfs_fltr_node *fltr = NULL;
int rc = 0;
cmd->data = QEDE_RFS_MAX_FLTR;
__qede_lock(edev);
if (!edev->arfs) {
rc = -EPERM;
goto unlock;
}
fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
fsp->location);
if (!fltr) {
DP_NOTICE(edev, "Rule not found - location=0x%x\n",
fsp->location);
rc = -EINVAL;
goto unlock;
}
if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
if (fltr->tuple.ip_proto == IPPROTO_TCP)
fsp->flow_type = TCP_V4_FLOW;
else
fsp->flow_type = UDP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
} else {
if (fltr->tuple.ip_proto == IPPROTO_TCP)
fsp->flow_type = TCP_V6_FLOW;
else
fsp->flow_type = UDP_V6_FLOW;
fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
&fltr->tuple.src_ipv6, sizeof(struct in6_addr));
memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
&fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
}
fsp->ring_cookie = fltr->rxq_id;
unlock:
__qede_unlock(edev);
return rc;
}
static int
qede_validate_and_check_flow_exist(struct qede_dev *edev,
struct ethtool_rx_flow_spec *fsp,
int *min_hlen)
{
__be16 src_port = 0x0, dst_port = 0x0;
struct qede_arfs_fltr_node *fltr;
struct hlist_node *temp;
struct hlist_head *head;
__be16 eth_proto;
u8 ip_proto;
if (fsp->location >= QEDE_RFS_MAX_FLTR ||
fsp->ring_cookie >= QEDE_RSS_COUNT(edev))
return -EINVAL;
if (fsp->flow_type == TCP_V4_FLOW) {
*min_hlen += sizeof(struct iphdr) +
sizeof(struct tcphdr);
eth_proto = htons(ETH_P_IP);
ip_proto = IPPROTO_TCP;
} else if (fsp->flow_type == UDP_V4_FLOW) {
*min_hlen += sizeof(struct iphdr) +
sizeof(struct udphdr);
eth_proto = htons(ETH_P_IP);
ip_proto = IPPROTO_UDP;
} else if (fsp->flow_type == TCP_V6_FLOW) {
*min_hlen += sizeof(struct ipv6hdr) +
sizeof(struct tcphdr);
eth_proto = htons(ETH_P_IPV6);
ip_proto = IPPROTO_TCP;
} else if (fsp->flow_type == UDP_V6_FLOW) {
*min_hlen += sizeof(struct ipv6hdr) +
sizeof(struct udphdr);
eth_proto = htons(ETH_P_IPV6);
ip_proto = IPPROTO_UDP;
} else {
DP_NOTICE(edev, "Unsupported flow type = 0x%x\n",
fsp->flow_type);
return -EPROTONOSUPPORT;
}
if (eth_proto == htons(ETH_P_IP)) {
src_port = fsp->h_u.tcp_ip4_spec.psrc;
dst_port = fsp->h_u.tcp_ip4_spec.pdst;
} else {
src_port = fsp->h_u.tcp_ip6_spec.psrc;
dst_port = fsp->h_u.tcp_ip6_spec.pdst;
}
head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
hlist_for_each_entry_safe(fltr, temp, head, node) {
if ((fltr->tuple.ip_proto == ip_proto &&
fltr->tuple.eth_proto == eth_proto &&
qede_compare_user_flow_ips(fltr, fsp, eth_proto) &&
fltr->tuple.src_port == src_port &&
fltr->tuple.dst_port == dst_port) ||
fltr->sw_id == fsp->location)
return -EEXIST;
}
return 0;
}
static int
qede_poll_arfs_filter_config(struct qede_dev *edev,
struct qede_arfs_fltr_node *fltr)
{
int count = QEDE_ARFS_POLL_COUNT;
while (fltr->used && count) {
msleep(20);
count--;
}
if (count == 0 || fltr->fw_rc) {
qede_dequeue_fltr_and_config_searcher(edev, fltr);
return -EIO;
}
return fltr->fw_rc;
}
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
{
struct ethtool_rx_flow_spec *fsp = &info->fs;
struct qede_arfs_fltr_node *n;
int min_hlen = ETH_HLEN, rc;
struct ethhdr *eth;
struct iphdr *ip;
__be16 *ports;
__qede_lock(edev);
if (!edev->arfs) {
rc = -EPERM;
goto unlock;
}
rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen);
if (rc)
goto unlock;
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n) {
rc = -ENOMEM;
goto unlock;
}
n->data = kzalloc(min_hlen, GFP_KERNEL);
if (!n->data) {
kfree(n);
rc = -ENOMEM;
goto unlock;
}
n->sw_id = fsp->location;
set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
n->buf_len = min_hlen;
n->rxq_id = fsp->ring_cookie;
n->next_rxq_id = n->rxq_id;
eth = (struct ethhdr *)n->data;
if (info->fs.flow_type == TCP_V4_FLOW ||
info->fs.flow_type == UDP_V4_FLOW) {
ports = (__be16 *)(n->data + ETH_HLEN +
sizeof(struct iphdr));
eth->h_proto = htons(ETH_P_IP);
n->tuple.eth_proto = htons(ETH_P_IP);
n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src;
n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst;
n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc;
n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst;
ports[0] = n->tuple.src_port;
ports[1] = n->tuple.dst_port;
ip = (struct iphdr *)(n->data + ETH_HLEN);
ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src;
ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst;
ip->version = 0x4;
ip->ihl = 0x5;
if (info->fs.flow_type == TCP_V4_FLOW) {
n->tuple.ip_proto = IPPROTO_TCP;
ip->protocol = IPPROTO_TCP;
} else {
n->tuple.ip_proto = IPPROTO_UDP;
ip->protocol = IPPROTO_UDP;
}
ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN);
} else {
struct ipv6hdr *ip6;
ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN);
ports = (__be16 *)(n->data + ETH_HLEN +
sizeof(struct ipv6hdr));
eth->h_proto = htons(ETH_P_IPV6);
n->tuple.eth_proto = htons(ETH_P_IPV6);
memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst,
sizeof(struct in6_addr));
n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc;
n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst;
ports[0] = n->tuple.src_port;
ports[1] = n->tuple.dst_port;
memcpy(&ip6->saddr, &n->tuple.src_ipv6,
sizeof(struct in6_addr));
memcpy(&ip6->daddr, &n->tuple.dst_ipv6,
sizeof(struct in6_addr));
ip6->version = 0x6;
if (info->fs.flow_type == TCP_V6_FLOW) {
n->tuple.ip_proto = IPPROTO_TCP;
ip6->nexthdr = NEXTHDR_TCP;
ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
} else {
n->tuple.ip_proto = IPPROTO_UDP;
ip6->nexthdr = NEXTHDR_UDP;
ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
}
}
rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
if (rc)
goto unlock;
qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
rc = qede_poll_arfs_filter_config(edev, n);
unlock:
__qede_unlock(edev);
return rc;
}
int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
{
struct ethtool_rx_flow_spec *fsp = &info->fs;
struct qede_arfs_fltr_node *fltr = NULL;
int rc = -EPERM;
__qede_lock(edev);
if (!edev->arfs)
goto unlock;
fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
fsp->location);
if (!fltr)
goto unlock;
qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
rc = qede_poll_arfs_filter_config(edev, fltr);
if (rc == 0)
qede_dequeue_fltr_and_config_searcher(edev, fltr);
unlock:
__qede_unlock(edev);
return rc;
}
int qede_get_arfs_filter_count(struct qede_dev *edev)
{
int count = 0;
__qede_lock(edev);
if (!edev->arfs)
goto unlock;
count = edev->arfs->filter_count;
unlock:
__qede_unlock(edev);
return count;
}
...@@ -873,9 +873,7 @@ static void qede_update_pf_params(struct qed_dev *cdev) ...@@ -873,9 +873,7 @@ static void qede_update_pf_params(struct qed_dev *cdev)
*/ */
pf_params.eth_pf_params.num_vf_cons = 48; pf_params.eth_pf_params.num_vf_cons = 48;
#ifdef CONFIG_RFS_ACCEL
pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
#endif
qed_ops->common->update_pf_params(cdev, &pf_params); qed_ops->common->update_pf_params(cdev, &pf_params);
} }
...@@ -1984,12 +1982,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, ...@@ -1984,12 +1982,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_vlan_mark_nonconfigured(edev); qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev); edev->ops->fastpath_stop(edev->cdev);
#ifdef CONFIG_RFS_ACCEL
if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
qede_poll_for_freeing_arfs_filters(edev); qede_poll_for_freeing_arfs_filters(edev);
qede_free_arfs(edev); qede_free_arfs(edev);
} }
#endif
/* Release the interrupts */ /* Release the interrupts */
qede_sync_free_irqs(edev); qede_sync_free_irqs(edev);
edev->ops->common->set_fp_int(edev->cdev, 0); edev->ops->common->set_fp_int(edev->cdev, 0);
...@@ -2041,13 +2039,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, ...@@ -2041,13 +2039,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
if (rc) if (rc)
goto err2; goto err2;
#ifdef CONFIG_RFS_ACCEL
if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
rc = qede_alloc_arfs(edev); rc = qede_alloc_arfs(edev);
if (rc) if (rc)
DP_NOTICE(edev, "aRFS memory allocation failed\n"); DP_NOTICE(edev, "aRFS memory allocation failed\n");
} }
#endif
qede_napi_add_enable(edev); qede_napi_add_enable(edev);
DP_INFO(edev, "Napi added and enabled\n"); DP_INFO(edev, "Napi added and enabled\n");
......
...@@ -323,6 +323,7 @@ struct qed_eth_ops { ...@@ -323,6 +323,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev, int (*configure_arfs_searcher)(struct qed_dev *cdev,
bool en_searcher); bool en_searcher);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
}; };
const struct qed_eth_ops *qed_get_eth_ops(void); const struct qed_eth_ops *qed_get_eth_ops(void);
......
...@@ -161,6 +161,18 @@ enum qed_nvm_images { ...@@ -161,6 +161,18 @@ enum qed_nvm_images {
QED_NVM_IMAGE_FCOE_CFG, QED_NVM_IMAGE_FCOE_CFG,
}; };
struct qed_link_eee_params {
u32 tx_lpi_timer;
#define QED_EEE_1G_ADV BIT(0)
#define QED_EEE_10G_ADV BIT(1)
/* Capabilities are represented using QED_EEE_*_ADV values */
u8 adv_caps;
u8 lp_adv_caps;
bool enable;
bool tx_lpi_enable;
};
enum qed_led_mode { enum qed_led_mode {
QED_LED_MODE_OFF, QED_LED_MODE_OFF,
QED_LED_MODE_ON, QED_LED_MODE_ON,
...@@ -172,8 +184,9 @@ enum qed_led_mode { ...@@ -172,8 +184,9 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF #define QED_COALESCE_MAX 0x1FF
#define QED_DEFAULT_RX_USECS 12 #define QED_DEFAULT_RX_USECS 12
#define QED_DEFAULT_TX_USECS 48
/* forward */ /* forward */
struct qed_dev; struct qed_dev;
...@@ -408,6 +421,7 @@ struct qed_link_params { ...@@ -408,6 +421,7 @@ struct qed_link_params {
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
u32 override_flags; u32 override_flags;
bool autoneg; bool autoneg;
u32 adv_speeds; u32 adv_speeds;
...@@ -422,6 +436,7 @@ struct qed_link_params { ...@@ -422,6 +436,7 @@ struct qed_link_params {
#define QED_LINK_LOOPBACK_EXT BIT(3) #define QED_LINK_LOOPBACK_EXT BIT(3)
#define QED_LINK_LOOPBACK_MAC BIT(4) #define QED_LINK_LOOPBACK_MAC BIT(4)
u32 loopback_mode; u32 loopback_mode;
struct qed_link_eee_params eee;
}; };
struct qed_link_output { struct qed_link_output {
...@@ -437,6 +452,12 @@ struct qed_link_output { ...@@ -437,6 +452,12 @@ struct qed_link_output {
u8 port; /* In PORT defs */ u8 port; /* In PORT defs */
bool autoneg; bool autoneg;
u32 pause_config; u32 pause_config;
/* EEE - capability & param */
bool eee_supported;
bool eee_active;
u8 sup_caps;
struct qed_link_eee_params eee;
}; };
struct qed_probe_params { struct qed_probe_params {
...@@ -653,16 +674,6 @@ struct qed_common_ops { ...@@ -653,16 +674,6 @@ struct qed_common_ops {
int (*nvm_get_image)(struct qed_dev *cdev, int (*nvm_get_image)(struct qed_dev *cdev,
enum qed_nvm_images type, u8 *buf, u16 len); enum qed_nvm_images type, u8 *buf, u16 len);
/**
* @brief get_coalesce - Get coalesce parameters in usec
*
* @param cdev
* @param rx_coal - Rx coalesce value in usec
* @param tx_coal - Tx coalesce value in usec
*
*/
void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
/** /**
* @brief set_coalesce - Configure Rx coalesce value in usec * @brief set_coalesce - Configure Rx coalesce value in usec
* *
...@@ -674,8 +685,8 @@ struct qed_common_ops { ...@@ -674,8 +685,8 @@ struct qed_common_ops {
* *
* @return 0 on success, error otherwise. * @return 0 on success, error otherwise.
*/ */
int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, int (*set_coalesce)(struct qed_dev *cdev,
u16 qid, u16 sb_id); u16 rx_coal, u16 tx_coal, void *handle);
/** /**
* @brief set_led - Configure LED mode * @brief set_led - Configure LED mode
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment