Commit 919f274f authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-iov-fw-reqs'

Yuval Mintz says:

====================
qed: IOV series - relax firmware requirements

In order for VFs to work, current implementation demands that the VF's
requried storm firmware would be exactly the version that was loaded by
the PF, which is a very harsh requirement.
This patch series is intended to relax this -
the recently submitted firmware is intended to be forward/backward
compatible in its fastpath [slowpath is configured by PF on behalf of VF],
and so VFs would only be required of having the same major faspath HSI in
order to work.

Most of the other patches in this series extend current forward
compatibilty of driver to reduce chance of breaking PF/VF compatibility
in the future. A few are unrelated IOV changes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3bcb846c 54fdd80f
This diff is collapsed.
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
#define _QED_SRIOV_H #define _QED_SRIOV_H
#include <linux/types.h> #include <linux/types.h>
#include "qed_vf.h" #include "qed_vf.h"
#define QED_ETH_VF_NUM_MAC_FILTERS 1
#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_VF_ARRAY_LENGTH (3) #define QED_VF_ARRAY_LENGTH (3)
#define IS_VF(cdev) ((cdev)->b_is_vf) #define IS_VF(cdev) ((cdev)->b_is_vf)
...@@ -22,7 +25,6 @@ ...@@ -22,7 +25,6 @@
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define QED_MAX_VF_CHAINS_PER_PF 16 #define QED_MAX_VF_CHAINS_PER_PF 16
#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
...@@ -118,6 +120,8 @@ struct qed_vf_shadow_config { ...@@ -118,6 +120,8 @@ struct qed_vf_shadow_config {
/* Shadow copy of all guest vlans */ /* Shadow copy of all guest vlans */
struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
/* Shadow copy of all configured MACs; Empty if forcing MACs */
u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
u8 inner_vlan_removal; u8 inner_vlan_removal;
}; };
...@@ -131,6 +135,9 @@ struct qed_vf_info { ...@@ -131,6 +135,9 @@ struct qed_vf_info {
struct qed_bulletin bulletin; struct qed_bulletin bulletin;
dma_addr_t vf_bulletin; dma_addr_t vf_bulletin;
/* PF saves a copy of the last VF acquire message */
struct vfpf_acquire_tlv acquire;
u32 concrete_fid; u32 concrete_fid;
u16 opaque_fid; u16 opaque_fid;
u16 mtu; u16 mtu;
......
...@@ -117,36 +117,64 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) ...@@ -117,36 +117,64 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
} }
#define VF_ACQUIRE_THRESH 3 #define VF_ACQUIRE_THRESH 3
#define VF_ACQUIRE_MAC_FILTERS 1 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
p_req->num_rxqs,
p_resp->num_rxqs,
p_req->num_rxqs,
p_resp->num_txqs,
p_req->num_sbs,
p_resp->num_sbs,
p_req->num_mac_filters,
p_resp->num_mac_filters,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters);
/* humble our request */
p_req->num_txqs = p_resp->num_txqs;
p_req->num_rxqs = p_resp->num_rxqs;
p_req->num_sbs = p_resp->num_sbs;
p_req->num_mac_filters = p_resp->num_mac_filters;
p_req->num_vlan_filters = p_resp->num_vlan_filters;
p_req->num_mc_filters = p_resp->num_mc_filters;
}
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
u8 rx_count = 1, tx_count = 1, num_sbs = 1; struct vf_pf_resc_request *p_resc;
u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
bool resources_acquired = false; bool resources_acquired = false;
struct vfpf_acquire_tlv *req; struct vfpf_acquire_tlv *req;
int rc = 0, attempts = 0; int rc = 0, attempts = 0;
/* clear mailbox and prep first tlv */ /* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
p_resc = &req->resc_request;
/* starting filling the request */ /* starting filling the request */
req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
req->resc_request.num_rxqs = rx_count; p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
req->resc_request.num_txqs = tx_count; p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
req->resc_request.num_sbs = num_sbs; p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
req->resc_request.num_mac_filters = num_mac; p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION; req->vfdev_info.fw_major = FW_MAJOR_VERSION;
req->vfdev_info.fw_minor = FW_MINOR_VERSION; req->vfdev_info.fw_minor = FW_MINOR_VERSION;
req->vfdev_info.fw_revision = FW_REVISION_VERSION; req->vfdev_info.fw_revision = FW_REVISION_VERSION;
req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
/* Fill capability field with any non-deprecated config we support */ /* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
...@@ -185,21 +213,21 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) ...@@ -185,21 +213,21 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
resources_acquired = true; resources_acquired = true;
} else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
attempts < VF_ACQUIRE_THRESH) { attempts < VF_ACQUIRE_THRESH) {
DP_VERBOSE(p_hwfn, qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
QED_MSG_IOV, &resp->resc);
"PF unwilling to fullfill resource request. Try PF recommended amount\n");
/* humble our request */
req->resc_request.num_txqs = resp->resc.num_txqs;
req->resc_request.num_rxqs = resp->resc.num_rxqs;
req->resc_request.num_sbs = resp->resc.num_sbs;
req->resc_request.num_mac_filters =
resp->resc.num_mac_filters;
req->resc_request.num_vlan_filters =
resp->resc.num_vlan_filters;
/* Clear response buffer */ /* Clear response buffer */
memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
} else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
pfdev_info->major_fp_hsi &&
(pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
DP_NOTICE(p_hwfn,
"PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
pfdev_info->major_fp_hsi,
pfdev_info->minor_fp_hsi,
ETH_HSI_VER_MAJOR,
ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
return -EINVAL;
} else { } else {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n", "PF returned error %d to VF acquisition request\n",
...@@ -225,6 +253,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) ...@@ -225,6 +253,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
} }
} }
if (ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
}
return 0; return 0;
} }
...@@ -405,8 +440,8 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, ...@@ -405,8 +440,8 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 pbl_size, void __iomem **pp_doorbell) u16 pbl_size, void __iomem **pp_doorbell)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req; struct vfpf_start_txq_tlv *req;
struct pfvf_def_resp_tlv *resp;
int rc; int rc;
/* clear mailbox and prep first tlv */ /* clear mailbox and prep first tlv */
...@@ -424,20 +459,24 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, ...@@ -424,20 +459,24 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
qed_add_tlv(p_hwfn, &p_iov->offset, qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp; resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc) if (rc)
return rc; goto exit;
if (resp->hdr.status != PFVF_STATUS_SUCCESS) if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
return -EINVAL; rc = -EINVAL;
goto exit;
}
if (pp_doorbell) { if (pp_doorbell) {
u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + DP_VERBOSE(p_hwfn, QED_MSG_IOV,
qed_db_addr(cid, DQ_DEMS_LEGACY); "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
tx_queue_id, *pp_doorbell, resp->offset);
} }
exit:
return rc; return rc;
} }
......
...@@ -96,7 +96,9 @@ struct vfpf_acquire_tlv { ...@@ -96,7 +96,9 @@ struct vfpf_acquire_tlv {
u32 driver_version; u32 driver_version;
u16 opaque_fid; /* ME register value */ u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
u8 padding[5]; u8 eth_fp_hsi_major;
u8 eth_fp_hsi_minor;
u8 padding[3];
} vfdev_info; } vfdev_info;
struct vf_pf_resc_request resc_request; struct vf_pf_resc_request resc_request;
...@@ -171,7 +173,14 @@ struct pfvf_acquire_resp_tlv { ...@@ -171,7 +173,14 @@ struct pfvf_acquire_resp_tlv {
struct pfvf_stats_info stats_info; struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN]; u8 port_mac[ETH_ALEN];
u8 padding2[2];
/* It's possible PF had to configure an older fastpath HSI
* [in case VF is newer than PF]. This is communicated back
* to the VF. It can also be used in case of error due to
* non-matching versions to shed light in VF about failure.
*/
u8 major_fp_hsi;
u8 minor_fp_hsi;
} pfdev_info; } pfdev_info;
struct pf_vf_resc { struct pf_vf_resc {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment