Commit 59ccf86f authored by Sudarsana Reddy Kalluru's avatar Sudarsana Reddy Kalluru Committed by David S. Miller

qed: Add driver infrastucture for handling mfw requests.

MFW requests the TLVs in interrupt context. Extracting of the required
data from upper layers and populating of the TLVs require process context.
The patch adds work-queues for processing the tlv requests. It also adds
the implementation for requesting the tlv values from appropriate protocol
driver.
Signed-off-by: default avatarSudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
Signed-off-by: default avatarAriel Elior <ariel.elior@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 77a509e4
...@@ -515,6 +515,10 @@ struct qed_simd_fp_handler { ...@@ -515,6 +515,10 @@ struct qed_simd_fp_handler {
void (*func)(void *); void (*func)(void *);
}; };
enum qed_slowpath_wq_flag {
QED_SLOWPATH_MFW_TLV_REQ,
};
struct qed_hwfn { struct qed_hwfn {
struct qed_dev *cdev; struct qed_dev *cdev;
u8 my_id; /* ID inside the PF */ u8 my_id; /* ID inside the PF */
...@@ -644,6 +648,9 @@ struct qed_hwfn { ...@@ -644,6 +648,9 @@ struct qed_hwfn {
#endif #endif
struct z_stream_s *stream; struct z_stream_s *stream;
struct workqueue_struct *slowpath_wq;
struct delayed_work slowpath_task;
unsigned long slowpath_task_flags;
}; };
struct pci_params { struct pci_params {
...@@ -908,6 +915,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev, ...@@ -908,6 +915,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
union qed_mcp_protocol_stats *stats); union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn); int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn); void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
enum qed_mfw_tlv_type type, enum qed_mfw_tlv_type type,
......
...@@ -946,6 +946,68 @@ static void qed_update_pf_params(struct qed_dev *cdev, ...@@ -946,6 +946,68 @@ static void qed_update_pf_params(struct qed_dev *cdev,
} }
} }
static void qed_slowpath_wq_stop(struct qed_dev *cdev)
{
int i;
if (IS_VF(cdev))
return;
for_each_hwfn(cdev, i) {
if (!cdev->hwfns[i].slowpath_wq)
continue;
flush_workqueue(cdev->hwfns[i].slowpath_wq);
destroy_workqueue(cdev->hwfns[i].slowpath_wq);
}
}
static void qed_slowpath_task(struct work_struct *work)
{
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
slowpath_task.work);
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
return;
}
if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
&hwfn->slowpath_task_flags))
qed_mfw_process_tlv_req(hwfn, ptt);
qed_ptt_release(hwfn, ptt);
}
static int qed_slowpath_wq_start(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
char name[NAME_SIZE];
int i;
if (IS_VF(cdev))
return 0;
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
if (!hwfn->slowpath_wq) {
DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
return -ENOMEM;
}
INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
}
return 0;
}
static int qed_slowpath_start(struct qed_dev *cdev, static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params) struct qed_slowpath_params *params)
{ {
...@@ -961,6 +1023,9 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -961,6 +1023,9 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (qed_iov_wq_start(cdev)) if (qed_iov_wq_start(cdev))
goto err; goto err;
if (qed_slowpath_wq_start(cdev))
goto err;
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev); &cdev->pdev->dev);
...@@ -1095,6 +1160,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -1095,6 +1160,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
qed_iov_wq_stop(cdev, false); qed_iov_wq_stop(cdev, false);
qed_slowpath_wq_stop(cdev);
return rc; return rc;
} }
...@@ -1103,6 +1170,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) ...@@ -1103,6 +1170,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
qed_slowpath_wq_stop(cdev);
qed_ll2_dealloc_if(cdev); qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
...@@ -2089,8 +2158,88 @@ void qed_get_protocol_stats(struct qed_dev *cdev, ...@@ -2089,8 +2158,88 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
} }
} }
int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
{
DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
"Scheduling slowpath task [Flag: %d]\n",
QED_SLOWPATH_MFW_TLV_REQ);
smp_mb__before_atomic();
set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
return 0;
}
static void
qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
{
struct qed_common_cb_ops *op = cdev->protocol_ops.common;
struct qed_eth_stats_common *p_common;
struct qed_generic_tlvs gen_tlvs;
struct qed_eth_stats stats;
int i;
memset(&gen_tlvs, 0, sizeof(gen_tlvs));
op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
tlv->flags.ipv4_csum_offload = true;
if (gen_tlvs.feat_flags & QED_TLV_LSO)
tlv->flags.lso_supported = true;
tlv->flags.b_set = true;
for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
if (is_valid_ether_addr(gen_tlvs.mac[i])) {
ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
tlv->mac_set[i] = true;
}
}
qed_get_vport_stats(cdev, &stats);
p_common = &stats.common;
tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
tlv->rx_frames_set = true;
tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
p_common->rx_bcast_bytes;
tlv->rx_bytes_set = true;
tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
p_common->tx_bcast_pkts;
tlv->tx_frames_set = true;
tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
p_common->tx_bcast_bytes;
tlv->rx_bytes_set = true;
}
int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
union qed_mfw_tlv_data *tlv_buf) union qed_mfw_tlv_data *tlv_buf)
{ {
return -EINVAL; struct qed_dev *cdev = hwfn->cdev;
struct qed_common_cb_ops *ops;
ops = cdev->protocol_ops.common;
if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
DP_NOTICE(hwfn, "Can't collect TLV management info\n");
return -EINVAL;
}
switch (type) {
case QED_MFW_TLV_GENERIC:
qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
break;
case QED_MFW_TLV_ETH:
ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
break;
case QED_MFW_TLV_FCOE:
ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
break;
case QED_MFW_TLV_ISCSI:
ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
break;
default:
break;
}
return 0;
} }
...@@ -1622,6 +1622,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, ...@@ -1622,6 +1622,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_S_TAG_UPDATE: case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt); qed_mcp_update_stag(p_hwfn, p_ptt);
break; break;
case MFW_DRV_MSG_GET_TLV_REQ:
qed_mfw_tlv_req(p_hwfn);
break; break;
default: default:
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
......
...@@ -751,6 +751,14 @@ struct qed_int_info { ...@@ -751,6 +751,14 @@ struct qed_int_info {
u8 used_cnt; u8 used_cnt;
}; };
struct qed_generic_tlvs {
#define QED_TLV_IP_CSUM BIT(0)
#define QED_TLV_LSO BIT(1)
u16 feat_flags;
#define QED_TLV_MAC_COUNT 3
u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
};
#define QED_NVM_SIGNATURE 0x12435687 #define QED_NVM_SIGNATURE 0x12435687
enum qed_nvm_flash_cmd { enum qed_nvm_flash_cmd {
...@@ -765,6 +773,8 @@ struct qed_common_cb_ops { ...@@ -765,6 +773,8 @@ struct qed_common_cb_ops {
void (*link_update)(void *dev, void (*link_update)(void *dev,
struct qed_link_output *link); struct qed_link_output *link);
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
void (*get_protocol_tlv_data)(void *dev, void *data);
}; };
struct qed_selftest_ops { struct qed_selftest_ops {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment