Commit a3f5aa90 authored by Alan Brady's avatar Alan Brady Committed by Jeff Kirsher

i40e: Enable VF to negotiate number of allocated queues

Currently the PF allocates a default number of queues for each VF and
cannot be changed.  This patch enables the VF to request a different
number of queues allocated to it.  This patch also adds a new virtchnl
op and capability flag to facilitate this negotiation.

After the PF receives a request message, it will set a requested number
of queues for that VF.  Then when the VF resets, its VSI will get a new
number of queues allocated to it.

This is a best effort request and since we only allocate a guaranteed
default number, if the VF tries to ask for more than the guaranteed
number, there may not be enough in HW to accommodate it unless other
queues for other VFs are freed. It should also be noted decreasing the
number queues allocated to a VF to below the default will NOT enable the
allocation of more than 32 VFs per PF and will not free queues guaranteed
to each VF by default.
Signed-off-by: default avatarAlan Brady <alan.brady@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent c97fc9b6
...@@ -77,6 +77,7 @@ ...@@ -77,6 +77,7 @@
#define i40e_default_queues_per_vmdq(pf) \ #define i40e_default_queues_per_vmdq(pf) \
(((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_MAX_VF_QUEUES 16
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
#define i40e_pf_get_max_q_per_tc(pf) \ #define i40e_pf_get_max_q_per_tc(pf) \
(((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
......
...@@ -815,6 +815,14 @@ static void i40e_free_vf_res(struct i40e_vf *vf) ...@@ -815,6 +815,14 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
*/ */
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
/* It's possible the VF had requeuested more queues than the default so
* do the accounting here when we're about to free them.
*/
if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
pf->queues_left += vf->num_queue_pairs -
I40E_DEFAULT_QUEUES_PER_VF;
}
/* free vsi & disconnect it from the parent uplink */ /* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_idx) { if (vf->lan_vsi_idx) {
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
...@@ -868,12 +876,27 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) ...@@ -868,12 +876,27 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
int total_queue_pairs = 0; int total_queue_pairs = 0;
int ret; int ret;
if (vf->num_req_queues &&
vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
pf->num_vf_qps = vf->num_req_queues;
else
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
/* allocate hw vsi context & associated resources */ /* allocate hw vsi context & associated resources */
ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
if (ret) if (ret)
goto error_alloc; goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
/* We account for each VF to get a default number of queue pairs. If
* the VF has now requested more, we need to account for that to make
* certain we never request more queues than we actually have left in
* HW.
*/
if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
pf->queues_left -=
total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
if (vf->trusted) if (vf->trusted)
set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else else
...@@ -1579,6 +1602,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1579,6 +1602,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
} }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
vfres->num_vsis = num_vsis; vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs; vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
...@@ -1986,6 +2012,52 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1986,6 +2012,52 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret); aq_ret);
} }
/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* VFs get a default number of queues but can use this message to request a
* different number. Will respond with either the number requested or the
* maximum we can support.
**/
static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
{
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
int req_pairs = vfres->num_queue_pairs;
int cur_pairs = vf->num_queue_pairs;
struct i40e_pf *pf = vf->pf;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
return -EINVAL;
if (req_pairs <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_pairs);
} else if (req_pairs > I40E_MAX_VF_QUEUES) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id,
I40E_MAX_VF_QUEUES);
vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
} else if (req_pairs - cur_pairs > pf->queues_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
vf->vf_id,
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
} else {
vf->num_req_queues = req_pairs;
}
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
(u8 *)vfres, sizeof(vfres));
}
/** /**
* i40e_vc_get_stats_msg * i40e_vc_get_stats_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
...@@ -2708,6 +2780,9 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, ...@@ -2708,6 +2780,9 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
break; break;
case VIRTCHNL_OP_REQUEST_QUEUES:
ret = i40e_vc_request_queues_msg(vf, msg, msglen);
break;
case VIRTCHNL_OP_UNKNOWN: case VIRTCHNL_OP_UNKNOWN:
default: default:
......
...@@ -97,6 +97,7 @@ struct i40e_vf { ...@@ -97,6 +97,7 @@ struct i40e_vf {
u16 lan_vsi_id; /* ID as used by firmware */ u16 lan_vsi_id; /* ID as used by firmware */
u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
u64 num_mdd_events; /* num of mdd events detected */ u64 num_mdd_events; /* num of mdd events detected */
/* num of continuous malformed or invalid msgs detected */ /* num of continuous malformed or invalid msgs detected */
u64 num_invalid_msgs; u64 num_invalid_msgs;
......
...@@ -135,6 +135,7 @@ enum virtchnl_ops { ...@@ -135,6 +135,7 @@ enum virtchnl_ops {
VIRTCHNL_OP_SET_RSS_HENA = 26, VIRTCHNL_OP_SET_RSS_HENA = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
}; };
/* This macro is used to generate a compilation error if a structure /* This macro is used to generate a compilation error if a structure
...@@ -235,6 +236,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); ...@@ -235,6 +236,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
...@@ -325,6 +327,21 @@ struct virtchnl_vsi_queue_config_info { ...@@ -325,6 +327,21 @@ struct virtchnl_vsi_queue_config_info {
struct virtchnl_queue_pair_info qpair[1]; struct virtchnl_queue_pair_info qpair[1];
}; };
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
* additional queues must be negotiated. This is a best effort request as it
* is possible the PF does not have enough queues left to support the request.
* If the PF cannot support the number requested it will respond with the
* maximum number it is able to support; otherwise it will respond with the
* number requested.
*/
/* VF resource request */
struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
/* VIRTCHNL_OP_CONFIG_IRQ_MAP /* VIRTCHNL_OP_CONFIG_IRQ_MAP
...@@ -691,6 +708,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, ...@@ -691,6 +708,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
break; break;
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
/* These are always errors coming from the VF. */ /* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN: case VIRTCHNL_OP_UNKNOWN:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment