Commit 6e7da286 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-06-01

This series contains updates to i40e, i40evf and the "new" AVF virtchnl.

This is the introduction of the Intel(R) Ethernet Adaptive Virtual
Function driver code and device ID, as presented at the NetDEV 1.2
conference in 2016.
http://netdevconf.org/1.2/session.html?anjali-singhai

The idea is to convert the interface between the i40evf driver
and the parent i40e PF driver to be generic, as the i40evf driver
should in the future be able to run on top of other Intel PF
drivers, and negotiate any features beyond a "base expected" set.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b2608311 85cfa717
Linux* Base Driver for Intel(R) Network Connection Linux* Base Driver for Intel(R) Network Connection
================================================== ==================================================
Intel XL710 X710 Virtual Function Linux driver. Intel Ethernet Adaptive Virtual Function Linux driver.
Copyright(c) 2013 Intel Corporation. Copyright(c) 2013-2017 Intel Corporation.
Contents Contents
======== ========
...@@ -11,19 +11,26 @@ Contents ...@@ -11,19 +11,26 @@ Contents
- Known Issues/Troubleshooting - Known Issues/Troubleshooting
- Support - Support
This file describes the i40evf Linux* Base Driver for the Intel(R) XL710 This file describes the i40evf Linux* Base Driver.
X710 Virtual Function.
The i40evf driver supports XL710 and X710 virtual function devices that The i40evf driver supports the below mentioned virtual function
can only be activated on kernels with CONFIG_PCI_IOV enabled. devices and can only be activated on kernels running the i40e or
newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV.
The i40evf driver requires CONFIG_PCI_MSI to be enabled.
The guest OS loading the i40evf driver must support MSI-X interrupts. The guest OS loading the i40evf driver must support MSI-X interrupts.
Supported Hardware
==================
Intel XL710 X710 Virtual Function
Intel Ethernet Adaptive Virtual Function
Intel X722 Virtual Function
Identifying Your Adapter Identifying Your Adapter
======================== ========================
For more information on how to identify your adapter, go to the Adapter & For more information on how to identify your adapter, go to the
Driver ID Guide at: Adapter & Driver ID Guide at:
http://support.intel.com/support/go/network/adapter/idguide.htm http://support.intel.com/support/go/network/adapter/idguide.htm
......
...@@ -6738,6 +6738,7 @@ F: Documentation/networking/i40e.txt ...@@ -6738,6 +6738,7 @@ F: Documentation/networking/i40e.txt
F: Documentation/networking/i40evf.txt F: Documentation/networking/i40evf.txt
F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/ F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
INTEL RDMA RNIC DRIVER INTEL RDMA RNIC DRIVER
M: Faisal Latif <faisal.latif@intel.com> M: Faisal Latif <faisal.latif@intel.com>
......
...@@ -236,12 +236,14 @@ config I40E_DCB ...@@ -236,12 +236,14 @@ config I40E_DCB
If unsure, say N. If unsure, say N.
config I40EVF config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" tristate "Intel(R) Ethernet Adaptive Virtual Function support"
depends on PCI_MSI depends on PCI_MSI
---help--- ---help---
This driver supports Intel(R) XL710 and X710 virtual functions. This driver supports virtual functions for Intel XL710,
For more information on how to identify your adapter, go to the X710, X722, and all devices advertising support for Intel
Adapter & Driver ID Guide that can be located at: Ethernet Adaptive Virtual Function devices. For more
information on how to identify your adapter, go to the Adapter
& Driver ID Guide that can be located at:
<http://support.intel.com> <http://support.intel.com>
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_prototype.h" #include "i40e_prototype.h"
#include "i40e_client.h" #include "i40e_client.h"
#include "i40e_virtchnl.h" #include <linux/avf/virtchnl.h>
#include "i40e_virtchnl_pf.h" #include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h" #include "i40e_txrx.h"
#include "i40e_dcb.h" #include "i40e_dcb.h"
......
...@@ -565,7 +565,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev, ...@@ -565,7 +565,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
i40e_status err; i40e_status err;
err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP, err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
0, msg, len, NULL); 0, msg, len, NULL);
if (err) if (err)
dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n", dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_adminq.h" #include "i40e_adminq.h"
#include "i40e_prototype.h" #include "i40e_prototype.h"
#include "i40e_virtchnl.h" #include <linux/avf/virtchnl.h>
/** /**
* i40e_set_mac_type - Sets MAC type * i40e_set_mac_type - Sets MAC type
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_alloc.h" #include "i40e_alloc.h"
#include "i40e_virtchnl.h" #include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in /* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are * the standard function pointer structures. These are
...@@ -333,10 +333,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) ...@@ -333,10 +333,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
/* i40e_common for VF drivers*/ /* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw, void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg); struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw); i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode, enum virtchnl_ops v_opcode,
i40e_status v_retval, i40e_status v_retval,
u8 *msg, u16 msglen, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
......
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_VIRTCHNL_H_
#define _I40E_VIRTCHNL_H_
#include "i40e_type.h"
/* Description:
* This header file describes the VF-PF communication protocol used
* by the various i40e drivers.
*
* Admin queue buffer usage:
* desc->opcode is always i40e_aqc_opc_send_msg_to_pf
* flags, retval, datalen, and data addr are all used normally.
* Firmware copies the cookie fields when sending messages between the PF and
* VF, but uses all other fields internally. Due to this limitation, we
* must send all messages as "indirect", i.e. using an external buffer.
*
* All the vsi indexes are relative to the VF. Each VF can have maximum of
* three VSIs. All the queue indexes are relative to the VSI. Each VF can
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value is of
* i40e_status_code type, defined in the i40e_type.h.
*
* In general, VF driver initialization should roughly follow the order of these
* opcodes. The VF driver must first validate the API version of the PF driver,
* then request a reset, then get resources, then configure queues and
* interrupts. After these operations are complete, the VF driver may start
* its queues, optionally add MAC and VLAN filters, and process traffic.
*/
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
* of the virtchnl_msg structure.
*/
enum i40e_virtchnl_ops {
/* The PF sends status change events to VFs using
* the I40E_VIRTCHNL_OP_EVENT opcode.
* VFs send requests to the PF using the other ops.
*/
I40E_VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
I40E_VIRTCHNL_OP_RESET_VF = 2,
I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
I40E_VIRTCHNL_OP_ADD_VLAN = 12,
I40E_VIRTCHNL_OP_DEL_VLAN = 13,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
};
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
*/
struct i40e_virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
i40e_status v_retval; /* ditto for desc->retval */
u32 vfid; /* used by PF when sending to VF */
};
/* Message descriptions and data structures.*/
/* I40E_VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
* in the same format, along with a return code.
* Reply from PF has its major/minor versions also in param0 and param1.
* If there is a major version mismatch, then the VF cannot operate.
* If there is a minor version mismatch, then the VF can operate but should
* add a warning to the system log.
*
* This enum element MUST always be specified as == 1, regardless of other
* changes in the API. The PF must always respond to this message without
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
#define I40E_VIRTCHNL_VERSION_MINOR 1
#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
};
/* I40E_VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters
* PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
* until reset completion is indicated. The admin queue must be reinitialized
* after this operation.
*
* When reset is complete, PF must ensure that all queues in all VSIs associated
* with the VF are stopped, all queue configurations in the HMC are set to 0,
* and all MAC and VLAN filters (except the default MAC address) on all VSIs
* are cleared.
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
* Version 1.0 VF sends this request to PF with no parameters
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
*/
struct i40e_virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
enum i40e_vsi_type vsi_type;
u16 qset_handle;
u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
u16 max_vectors;
u16 max_mtu;
u32 vf_offload_flags;
u32 rss_key_size;
u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
* External data buffer contains one instance of i40e_virtchnl_txq_info.
* PF configures requested queue and returns a status code.
*/
/* Tx queue config info */
struct i40e_virtchnl_txq_info {
u16 vsi_id;
u16 queue_id;
u16 ring_len; /* number of descriptors, multiple of 8 */
u16 headwb_enabled;
u64 dma_ring_addr;
u64 dma_headwb_addr;
};
/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of i40e_virtchnl_rxq_info.
* PF configures requested queue and returns a status code.
*/
/* Rx queue config info */
struct i40e_virtchnl_rxq_info {
u16 vsi_id;
u16 queue_id;
u32 ring_len; /* number of descriptors, multiple of 32 */
u16 hdr_size;
u16 splithdr_enabled;
u32 databuffer_size;
u32 max_pkt_size;
u64 dma_ring_addr;
enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
};
/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
* VF sends this message to set parameters for all active TX and RX queues
* associated with the specified VSI.
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
*/
struct i40e_virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
struct i40e_virtchnl_txq_info txq;
struct i40e_virtchnl_rxq_info rxq;
};
struct i40e_virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
struct i40e_virtchnl_queue_pair_info qpair[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
* The "other" causes are always mapped to vector 0.
* PF configures interrupt mapping and returns status.
*/
struct i40e_virtchnl_vector_map {
u16 vsi_id;
u16 vector_id;
u16 rxq_map;
u16 txq_map;
u16 rxitr_idx;
u16 txitr_idx;
};
struct i40e_virtchnl_irq_map_info {
u16 num_vectors;
struct i40e_virtchnl_vector_map vecmap[1];
};
/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
* I40E_VIRTCHNL_OP_DISABLE_QUEUES
* VF sends these message to enable or disable TX/RX queue pairs.
* The queues fields are bitmaps indicating which queues to act upon.
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
*/
struct i40e_virtchnl_queue_select {
u16 vsi_id;
u16 pad;
u32 rx_queues;
u32 tx_queues;
};
/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
* VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI.
* PF adds the filters and returns status.
*/
/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
* VF sends this message in order to remove one or more unicast or multicast
* filters for the specified VSI.
* PF removes the filters and returns status.
*/
struct i40e_virtchnl_ether_addr {
u8 addr[ETH_ALEN];
u8 pad[2];
};
struct i40e_virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
struct i40e_virtchnl_ether_addr list[1];
};
/* I40E_VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
/* I40E_VIRTCHNL_OP_DEL_VLAN
* VF sends this message to remove one or more VLAN tag filters for receives.
* PF removes the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
struct i40e_virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
u16 vlan_id[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
* Note: we assume that broadcast accept mode is always enabled.
*/
struct i40e_virtchnl_promisc_info {
u16 vsi_id;
u16 flags;
};
#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
/* I40E_VIRTCHNL_OP_GET_STATS
* VF sends this message to request stats for the selected VSI. VF uses
* the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF.
*
* PF replies with struct i40e_eth_stats in an external buffer.
*/
/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
* I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF
* and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation. If this is the case, then the RSS fields in
* the VF resource struct are valid.
* Both the key and LUT are initialized to 0 by the PF, meaning that
* RSS is effectively disabled until set up by the VF.
*/
struct i40e_virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */
};
struct i40e_virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
u8 lut[1]; /* RSS lookup table*/
};
/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
* I40E_VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
* Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
*/
struct i40e_virtchnl_rss_hena {
u64 hena;
};
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
* messages in response to this one.
*/
enum i40e_virtchnl_event_codes {
I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
I40E_VIRTCHNL_EVENT_LINK_CHANGE,
I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
};
#define I40E_PF_EVENT_SEVERITY_INFO 0
#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct i40e_virtchnl_pf_event {
enum i40e_virtchnl_event_codes event;
union {
struct {
enum i40e_aq_link_speed link_speed;
bool link_status;
} link_event;
} event_data;
int severity;
};
/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
* VF uses this message to request PF to map IWARP vectors to IWARP queues.
* The request for this originates from the VF IWARP driver through
* a client interface between VF LAN and VF IWARP driver.
* A vector could have an AEQ and CEQ attached to it although
* there is a single AEQ per VF IWARP instance in which case
* most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
* There will never be a case where there will be multiple CEQs attached
* to a single vector.
* PF configures interrupt mapping and returns status.
*/
/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
* In order for us to keep the interface simple, SW will define a
* unique type value for AEQ.
*/
#define I40E_QUEUE_TYPE_PE_AEQ 0x80
#define I40E_QUEUE_INVALID_IDX 0xFFFF
struct i40e_virtchnl_iwarp_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
struct i40e_virtchnl_iwarp_qvlist_info {
u32 num_vectors;
struct i40e_virtchnl_iwarp_qv_info qv_info[1];
};
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum i40e_vfr_states {
I40E_VFR_INPROGRESS = 0,
I40E_VFR_COMPLETED,
I40E_VFR_VFACTIVE,
I40E_VFR_UNKNOWN,
};
#endif /* _I40E_VIRTCHNL_H_ */
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
* send a message to all VFs on a given PF * send a message to all VFs on a given PF
**/ **/
static void i40e_vc_vf_broadcast(struct i40e_pf *pf, static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
enum i40e_virtchnl_ops v_opcode, enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, i40e_status v_retval, u8 *msg,
u16 msglen) u16 msglen)
{ {
...@@ -70,14 +70,14 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, ...@@ -70,14 +70,14 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
**/ **/
static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
{ {
struct i40e_virtchnl_pf_event pfe; struct virtchnl_pf_event pfe;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *ls = &pf->hw.phy.link_info; struct i40e_link_status *ls = &pf->hw.phy.link_info;
int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; pfe.severity = PF_EVENT_SEVERITY_INFO;
if (vf->link_forced) { if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed = pfe.event_data.link_event.link_speed =
...@@ -85,9 +85,10 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) ...@@ -85,9 +85,10 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
} else { } else {
pfe.event_data.link_event.link_status = pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP; ls->link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed = ls->link_speed; pfe.event_data.link_event.link_speed =
(enum virtchnl_link_speed)ls->link_speed;
} }
i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL); 0, (u8 *)&pfe, sizeof(pfe), NULL);
} }
...@@ -113,12 +114,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) ...@@ -113,12 +114,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
**/ **/
void i40e_vc_notify_reset(struct i40e_pf *pf) void i40e_vc_notify_reset(struct i40e_pf *pf)
{ {
struct i40e_virtchnl_pf_event pfe; struct virtchnl_pf_event pfe;
pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
(u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
} }
/** /**
...@@ -129,7 +130,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) ...@@ -129,7 +130,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
**/ **/
void i40e_vc_notify_vf_reset(struct i40e_vf *vf) void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
{ {
struct i40e_virtchnl_pf_event pfe; struct virtchnl_pf_event pfe;
int abs_vf_id; int abs_vf_id;
/* validate the request */ /* validate the request */
...@@ -143,11 +144,11 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) ...@@ -143,11 +144,11 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, 0, (u8 *)&pfe,
sizeof(struct i40e_virtchnl_pf_event), NULL); sizeof(struct virtchnl_pf_event), NULL);
} }
/***********************misc routines*****************************/ /***********************misc routines*****************************/
...@@ -250,7 +251,7 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, ...@@ -250,7 +251,7 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
* configure irq link list from the map * configure irq link list from the map
**/ **/
static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
struct i40e_virtchnl_vector_map *vecmap) struct virtchnl_vector_map *vecmap)
{ {
unsigned long linklistmap = 0, tempmap; unsigned long linklistmap = 0, tempmap;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
...@@ -338,7 +339,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, ...@@ -338,7 +339,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
/* if the vf is running in polling mode and using interrupt zero, /* if the vf is running in polling mode and using interrupt zero,
* need to disable auto-mask on enabling zero interrupt for VFs. * need to disable auto-mask on enabling zero interrupt for VFs.
*/ */
if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) && if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
(vector_id == 0)) { (vector_id == 0)) {
reg = rd32(hw, I40E_GLINT_CTL); reg = rd32(hw, I40E_GLINT_CTL);
if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
...@@ -359,7 +360,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, ...@@ -359,7 +360,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
{ {
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
u32 msix_vf; u32 msix_vf;
u32 i; u32 i;
...@@ -368,7 +369,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) ...@@ -368,7 +369,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
msix_vf = pf->hw.func_caps.num_msix_vectors_vf; msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) { for (i = 0; i < qvlist_info->num_vectors; i++) {
struct i40e_virtchnl_iwarp_qv_info *qv_info; struct virtchnl_iwarp_qv_info *qv_info;
u32 next_q_index, next_q_type; u32 next_q_index, next_q_type;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u32 v_idx, reg_idx, reg; u32 v_idx, reg_idx, reg;
...@@ -409,17 +410,17 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) ...@@ -409,17 +410,17 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
* Return 0 on success or < 0 on error * Return 0 on success or < 0 on error
**/ **/
static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info) struct virtchnl_iwarp_qvlist_info *qvlist_info)
{ {
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
struct i40e_virtchnl_iwarp_qv_info *qv_info; struct virtchnl_iwarp_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg; u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type; u32 next_q_idx, next_q_type;
u32 msix_vf, size; u32 msix_vf, size;
size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + size = sizeof(struct virtchnl_iwarp_qvlist_info) +
(sizeof(struct i40e_virtchnl_iwarp_qv_info) * (sizeof(struct virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1)); (qvlist_info->num_vectors - 1));
vf->qvlist_info = kzalloc(size, GFP_KERNEL); vf->qvlist_info = kzalloc(size, GFP_KERNEL);
vf->qvlist_info->num_vectors = qvlist_info->num_vectors; vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
...@@ -492,7 +493,7 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, ...@@ -492,7 +493,7 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
**/ **/
static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id, u16 vsi_queue_id,
struct i40e_virtchnl_txq_info *info) struct virtchnl_txq_info *info)
{ {
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
...@@ -569,7 +570,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, ...@@ -569,7 +570,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
**/ **/
static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id, u16 vsi_queue_id,
struct i40e_virtchnl_rxq_info *info) struct virtchnl_rxq_info *info)
{ {
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
...@@ -1017,7 +1018,7 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) ...@@ -1017,7 +1018,7 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* after VF has been fully initialized, because the VF driver may * after VF has been fully initialized, because the VF driver may
* request resources immediately after setting this flag. * request resources immediately after setting this flag.
*/ */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
} }
/** /**
...@@ -1461,7 +1462,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, ...@@ -1461,7 +1462,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
* send resp msg to VF * send resp msg to VF
**/ **/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
enum i40e_virtchnl_ops opcode, enum virtchnl_ops opcode,
i40e_status retval) i40e_status retval)
{ {
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
...@@ -1475,18 +1476,17 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, ...@@ -1475,18 +1476,17 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
**/ **/
static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct i40e_virtchnl_version_info info = { struct virtchnl_version_info info = {
I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
}; };
vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; vf->vf_ver = *(struct virtchnl_version_info *)msg;
/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
if (VF_IS_V10(vf)) if (VF_IS_V10(&vf->vf_ver))
info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_SUCCESS, (u8 *)&info, I40E_SUCCESS, (u8 *)&info,
sizeof(struct sizeof(struct virtchnl_version_info));
i40e_virtchnl_version_info));
} }
/** /**
...@@ -1499,7 +1499,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1499,7 +1499,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
**/ **/
static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{ {
struct i40e_virtchnl_vf_resource *vfres = NULL; struct virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
...@@ -1512,8 +1512,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1512,8 +1512,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
goto err; goto err;
} }
len = (sizeof(struct i40e_virtchnl_vf_resource) + len = (sizeof(struct virtchnl_vf_resource) +
sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); sizeof(struct virtchnl_vsi_resource) * num_vsis);
vfres = kzalloc(len, GFP_KERNEL); vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) { if (!vfres) {
...@@ -1521,50 +1521,48 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1521,50 +1521,48 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
len = 0; len = 0;
goto err; goto err;
} }
if (VF_IS_V11(vf)) if (VF_IS_V11(&vf->vf_ver))
vf->driver_caps = *(u32 *)msg; vf->driver_caps = *(u32 *)msg;
else else
vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_RSS_REG |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN; VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; vfres->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid) if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
if (i40e_vf_client_capable(pf, vf->vf_id) && if (i40e_vf_client_capable(pf, vf->vf_id) &&
(vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
} }
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF; vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else { } else {
if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
(vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)) (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
vfres->vf_offload_flags |= vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
else else
vfres->vf_offload_flags |= vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
} }
if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_offload_flags |= vfres->vf_offload_flags |=
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
} }
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP; vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
(vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
if (pf->flags & I40E_FLAG_MFP_ENABLED) { if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
...@@ -1572,13 +1570,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1572,13 +1570,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_PARAM; ret = I40E_ERR_PARAM;
goto err; goto err;
} }
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
} }
if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
vfres->vf_offload_flags |= vfres->vf_offload_flags |=
I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
} }
vfres->num_vsis = num_vsis; vfres->num_vsis = num_vsis;
...@@ -1589,7 +1587,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1589,7 +1587,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->lan_vsi_idx) { if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV; vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
/* VFs only use TC 0 */ /* VFs only use TC 0 */
vfres->vsi_res[0].qset_handle vfres->vsi_res[0].qset_handle
...@@ -1601,7 +1599,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1601,7 +1599,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
err: err:
/* send the response back to the VF */ /* send the response back to the VF */
ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
aq_ret, (u8 *)vfres, len); aq_ret, (u8 *)vfres, len);
kfree(vfres); kfree(vfres);
...@@ -1655,8 +1653,8 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) ...@@ -1655,8 +1653,8 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
u8 *msg, u16 msglen) u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_promisc_info *info = struct virtchnl_promisc_info *info =
(struct i40e_virtchnl_promisc_info *)msg; (struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
...@@ -1683,7 +1681,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1683,7 +1681,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
goto error_param; goto error_param;
} }
/* Multicast promiscuous handling*/ /* Multicast promiscuous handling*/
if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) if (info->flags & FLAG_VF_MULTICAST_PROMISC)
allmulti = true; allmulti = true;
if (vf->port_vlan_id) { if (vf->port_vlan_id) {
...@@ -1734,7 +1732,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1734,7 +1732,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
} }
if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) if (info->flags & FLAG_VF_UNICAST_PROMISC)
alluni = true; alluni = true;
if (vf->port_vlan_id) { if (vf->port_vlan_id) {
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
...@@ -1788,7 +1786,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1788,7 +1786,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, return i40e_vc_send_resp_to_vf(vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
aq_ret); aq_ret);
} }
...@@ -1803,9 +1801,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1803,9 +1801,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
**/ **/
static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_vsi_queue_config_info *qci = struct virtchnl_vsi_queue_config_info *qci =
(struct i40e_virtchnl_vsi_queue_config_info *)msg; (struct virtchnl_vsi_queue_config_info *)msg;
struct i40e_virtchnl_queue_pair_info *qpi; struct virtchnl_queue_pair_info *qpi;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id; u16 vsi_id, vsi_queue_id;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
...@@ -1845,7 +1843,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1845,7 +1843,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
aq_ret); aq_ret);
} }
...@@ -1860,9 +1858,9 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1860,9 +1858,9 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_irq_map_info *irqmap_info = struct virtchnl_irq_map_info *irqmap_info =
(struct i40e_virtchnl_irq_map_info *)msg; (struct virtchnl_irq_map_info *)msg;
struct i40e_virtchnl_vector_map *map; struct virtchnl_vector_map *map;
u16 vsi_id, vsi_queue_id, vector_id; u16 vsi_id, vsi_queue_id, vector_id;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
unsigned long tempmap; unsigned long tempmap;
...@@ -1908,7 +1906,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1908,7 +1906,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
} }
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
aq_ret); aq_ret);
} }
...@@ -1922,8 +1920,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1922,8 +1920,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_queue_select *vqs = struct virtchnl_queue_select *vqs =
(struct i40e_virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id; u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
...@@ -1947,7 +1945,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1947,7 +1945,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_TIMEOUT; aq_ret = I40E_ERR_TIMEOUT;
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
aq_ret); aq_ret);
} }
...@@ -1962,8 +1960,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1962,8 +1960,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_queue_select *vqs = struct virtchnl_queue_select *vqs =
(struct i40e_virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
...@@ -1986,7 +1984,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1986,7 +1984,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
aq_ret); aq_ret);
} }
...@@ -2000,8 +1998,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2000,8 +1998,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_queue_select *vqs = struct virtchnl_queue_select *vqs =
(struct i40e_virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_eth_stats stats; struct i40e_eth_stats stats;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
...@@ -2029,7 +2027,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2029,7 +2027,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param: error_param:
/* send the response back to the VF */ /* send the response back to the VF */
return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
(u8 *)&stats, sizeof(stats)); (u8 *)&stats, sizeof(stats));
} }
...@@ -2088,8 +2086,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) ...@@ -2088,8 +2086,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
**/ **/
static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_ether_addr_list *al = struct virtchnl_ether_addr_list *al =
(struct i40e_virtchnl_ether_addr_list *)msg; (struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL; struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id; u16 vsi_id = al->vsi_id;
...@@ -2143,7 +2141,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2143,7 +2141,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret); ret);
} }
...@@ -2157,8 +2155,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2157,8 +2155,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_ether_addr_list *al = struct virtchnl_ether_addr_list *al =
(struct i40e_virtchnl_ether_addr_list *)msg; (struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL; struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id; u16 vsi_id = al->vsi_id;
...@@ -2203,7 +2201,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2203,7 +2201,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
ret); ret);
} }
...@@ -2217,8 +2215,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2217,8 +2215,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_vlan_filter_list *vfl = struct virtchnl_vlan_filter_list *vfl =
(struct i40e_virtchnl_vlan_filter_list *)msg; (struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL; struct i40e_vsi *vsi = NULL;
u16 vsi_id = vfl->vsi_id; u16 vsi_id = vfl->vsi_id;
...@@ -2277,7 +2275,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2277,7 +2275,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
} }
/** /**
...@@ -2290,8 +2288,8 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2290,8 +2288,8 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_vlan_filter_list *vfl = struct virtchnl_vlan_filter_list *vfl =
(struct i40e_virtchnl_vlan_filter_list *)msg; (struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL; struct i40e_vsi *vsi = NULL;
u16 vsi_id = vfl->vsi_id; u16 vsi_id = vfl->vsi_id;
...@@ -2335,7 +2333,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2335,7 +2333,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
} }
/** /**
...@@ -2363,7 +2361,7 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2363,7 +2361,7 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
aq_ret); aq_ret);
} }
...@@ -2379,8 +2377,8 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2379,8 +2377,8 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
bool config) bool config)
{ {
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = struct virtchnl_iwarp_qvlist_info *qvlist_info =
(struct i40e_virtchnl_iwarp_qvlist_info *)msg; (struct virtchnl_iwarp_qvlist_info *)msg;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
...@@ -2399,8 +2397,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, ...@@ -2399,8 +2397,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, return i40e_vc_send_resp_to_vf(vf,
config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
aq_ret); aq_ret);
} }
...@@ -2414,8 +2412,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, ...@@ -2414,8 +2412,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
**/ **/
static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_rss_key *vrk = struct virtchnl_rss_key *vrk =
(struct i40e_virtchnl_rss_key *)msg; (struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL; struct i40e_vsi *vsi = NULL;
u16 vsi_id = vrk->vsi_id; u16 vsi_id = vrk->vsi_id;
...@@ -2432,7 +2430,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2432,7 +2430,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
err: err:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
aq_ret); aq_ret);
} }
...@@ -2446,8 +2444,8 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2446,8 +2444,8 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_rss_lut *vrl = struct virtchnl_rss_lut *vrl =
(struct i40e_virtchnl_rss_lut *)msg; (struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL; struct i40e_vsi *vsi = NULL;
u16 vsi_id = vrl->vsi_id; u16 vsi_id = vrl->vsi_id;
...@@ -2464,7 +2462,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2464,7 +2462,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
/* send the response to the VF */ /* send the response to the VF */
err: err:
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
aq_ret); aq_ret);
} }
...@@ -2478,7 +2476,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2478,7 +2476,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_rss_hena *vrh = NULL; struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
int len = 0; int len = 0;
...@@ -2487,7 +2485,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2487,7 +2485,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto err; goto err;
} }
len = sizeof(struct i40e_virtchnl_rss_hena); len = sizeof(struct virtchnl_rss_hena);
vrh = kzalloc(len, GFP_KERNEL); vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) { if (!vrh) {
...@@ -2498,7 +2496,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2498,7 +2496,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
vrh->hena = i40e_pf_get_default_rss_hena(pf); vrh->hena = i40e_pf_get_default_rss_hena(pf);
err: err:
/* send the response back to the VF */ /* send the response back to the VF */
aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
aq_ret, (u8 *)vrh, len); aq_ret, (u8 *)vrh, len);
kfree(vrh); kfree(vrh);
return aq_ret; return aq_ret;
...@@ -2514,8 +2512,8 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2514,8 +2512,8 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
**/ **/
static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
{ {
struct i40e_virtchnl_rss_hena *vrh = struct virtchnl_rss_hena *vrh =
(struct i40e_virtchnl_rss_hena *)msg; (struct virtchnl_rss_hena *)msg;
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
...@@ -2530,170 +2528,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2530,170 +2528,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* send the response to the VF */ /* send the response to the VF */
err: err:
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
aq_ret);
}
/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
* @msghndl: msg handle
*
* validate msg
**/
static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
bool err_msg_format = false;
int valid_len = 0;
/* Check if VF is disabled. */
if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
return I40E_ERR_PARAM;
/* Validate message length. */
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(vf))
valid_len = sizeof(u32);
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
break;
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_rxq_info);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
struct i40e_virtchnl_vsi_queue_config_info *vqc =
(struct i40e_virtchnl_vsi_queue_config_info *)msg;
valid_len += (vqc->num_queue_pairs *
sizeof(struct
i40e_virtchnl_queue_pair_info));
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
if (msglen >= valid_len) {
struct i40e_virtchnl_irq_map_info *vimi =
(struct i40e_virtchnl_irq_map_info *)msg;
valid_len += (vimi->num_vectors *
sizeof(struct i40e_virtchnl_vector_map));
if (vimi->num_vectors == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
if (msglen >= valid_len) {
struct i40e_virtchnl_ether_addr_list *veal =
(struct i40e_virtchnl_ether_addr_list *)msg;
valid_len += veal->num_elements *
sizeof(struct i40e_virtchnl_ether_addr);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
case I40E_VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
struct i40e_virtchnl_vlan_filter_list *vfl =
(struct i40e_virtchnl_vlan_filter_list *)msg;
valid_len += vfl->num_elements * sizeof(u16);
if (vfl->num_elements == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct i40e_virtchnl_promisc_info);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
break;
case I40E_VIRTCHNL_OP_IWARP:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
*/
if (msglen)
valid_len = msglen;
else
err_msg_format = true;
break;
case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
valid_len = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
if (msglen >= valid_len) {
struct i40e_virtchnl_iwarp_qvlist_info *qv =
(struct i40e_virtchnl_iwarp_qvlist_info *)msg;
if (qv->num_vectors == 0) {
err_msg_format = true;
break;
}
valid_len += ((qv->num_vectors - 1) *
sizeof(struct i40e_virtchnl_iwarp_qv_info));
}
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
valid_len = sizeof(struct i40e_virtchnl_rss_key);
if (msglen >= valid_len) {
struct i40e_virtchnl_rss_key *vrk =
(struct i40e_virtchnl_rss_key *)msg;
if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
err_msg_format = true;
break;
}
valid_len += vrk->key_len - 1;
}
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
valid_len = sizeof(struct i40e_virtchnl_rss_lut);
if (msglen >= valid_len) {
struct i40e_virtchnl_rss_lut *vrl =
(struct i40e_virtchnl_rss_lut *)msg;
if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
err_msg_format = true;
break;
}
valid_len += vrl->lut_entries - 1;
}
break;
case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case I40E_VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct i40e_virtchnl_rss_hena);
break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
return -EPERM;
}
/* few more checks */
if ((valid_len != msglen) || (err_msg_format)) {
i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
return -EINVAL;
} else {
return 0;
}
} }
/** /**
...@@ -2719,80 +2554,104 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, ...@@ -2719,80 +2554,104 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
if (local_vf_id >= pf->num_alloc_vfs) if (local_vf_id >= pf->num_alloc_vfs)
return -EINVAL; return -EINVAL;
vf = &(pf->vf[local_vf_id]); vf = &(pf->vf[local_vf_id]);
/* Check if VF is disabled. */
if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
return I40E_ERR_PARAM;
/* perform basic checks on the msg */ /* perform basic checks on the msg */
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
/* perform additional checks specific to this driver */
if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
ret = -EINVAL;
} else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
ret = -EINVAL;
}
if (ret) { if (ret) {
i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen); local_vf_id, v_opcode, msglen);
return ret; switch (ret) {
case VIRTCHNL_ERR_PARAM:
return -EPERM;
default:
return -EINVAL;
}
} }
switch (v_opcode) { switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION: case VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf, msg); ret = i40e_vc_get_version_msg(vf, msg);
break; break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: case VIRTCHNL_OP_GET_VF_RESOURCES:
ret = i40e_vc_get_vf_resources_msg(vf, msg); ret = i40e_vc_get_vf_resources_msg(vf, msg);
break; break;
case I40E_VIRTCHNL_OP_RESET_VF: case VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf); i40e_vc_reset_vf_msg(vf);
ret = 0; ret = 0;
break; break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ret = i40e_vc_config_queues_msg(vf, msg, msglen); ret = i40e_vc_config_queues_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES: case VIRTCHNL_OP_ENABLE_QUEUES:
ret = i40e_vc_enable_queues_msg(vf, msg, msglen); ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
i40e_vc_notify_vf_link_state(vf); i40e_vc_notify_vf_link_state(vf);
break; break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES: case VIRTCHNL_OP_DISABLE_QUEUES:
ret = i40e_vc_disable_queues_msg(vf, msg, msglen); ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: case VIRTCHNL_OP_ADD_ETH_ADDR:
ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: case VIRTCHNL_OP_DEL_ETH_ADDR:
ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_ADD_VLAN:
ret = i40e_vc_add_vlan_msg(vf, msg, msglen); ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_DEL_VLAN: case VIRTCHNL_OP_DEL_VLAN:
ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_GET_STATS: case VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen); ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_IWARP: case VIRTCHNL_OP_IWARP:
ret = i40e_vc_iwarp_msg(vf, msg, msglen); ret = i40e_vc_iwarp_msg(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
break; break;
case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
break; break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: case VIRTCHNL_OP_CONFIG_RSS_KEY:
ret = i40e_vc_config_rss_key(vf, msg, msglen); ret = i40e_vc_config_rss_key(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg, msglen); ret = i40e_vc_config_rss_lut(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
ret = i40e_vc_get_rss_hena(vf, msg, msglen); ret = i40e_vc_get_rss_hena(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_SET_RSS_HENA: case VIRTCHNL_OP_SET_RSS_HENA:
ret = i40e_vc_set_rss_hena(vf, msg, msglen); ret = i40e_vc_set_rss_hena(vf, msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_UNKNOWN: case VIRTCHNL_OP_UNKNOWN:
default: default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, local_vf_id); v_opcode, local_vf_id);
...@@ -3218,7 +3077,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) ...@@ -3218,7 +3077,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back; struct i40e_pf *pf = np->vsi->back;
struct i40e_virtchnl_pf_event pfe; struct virtchnl_pf_event pfe;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf; struct i40e_vf *vf;
int abs_vf_id; int abs_vf_id;
...@@ -3234,8 +3093,8 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) ...@@ -3234,8 +3093,8 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf = &pf->vf[vf_id]; vf = &pf->vf[vf_id];
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; pfe.severity = PF_EVENT_SEVERITY_INFO;
switch (link) { switch (link) {
case IFLA_VF_LINK_STATE_AUTO: case IFLA_VF_LINK_STATE_AUTO:
...@@ -3243,6 +3102,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) ...@@ -3243,6 +3102,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
pfe.event_data.link_event.link_status = pfe.event_data.link_event.link_status =
pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed = pfe.event_data.link_event.link_speed =
(enum virtchnl_link_speed)
pf->hw.phy.link_info.link_speed; pf->hw.phy.link_info.link_speed;
break; break;
case IFLA_VF_LINK_STATE_ENABLE: case IFLA_VF_LINK_STATE_ENABLE:
...@@ -3262,7 +3122,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) ...@@ -3262,7 +3122,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
goto error_out; goto error_out;
} }
/* Notify the VF of its new link state */ /* Notify the VF of its new link state */
i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL); 0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out: error_out:
......
...@@ -40,9 +40,6 @@ ...@@ -40,9 +40,6 @@
#define I40E_VLAN_MASK 0xFFF #define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0x7000 #define I40E_PRIORITY_MASK 0x7000
#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
/* Various queue ctrls */ /* Various queue ctrls */
enum i40e_queue_ctrl { enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0, I40E_QUEUE_CTRL_UNKNOWN = 0,
...@@ -81,13 +78,13 @@ struct i40e_vf { ...@@ -81,13 +78,13 @@ struct i40e_vf {
s16 vf_id; s16 vf_id;
/* all VF vsis connect to the same parent */ /* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type; enum i40e_switch_element_types parent_type;
struct i40e_virtchnl_version_info vf_ver; struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */ u32 driver_caps; /* reported by VF driver */
/* VF Port Extender (PE) stag if used */ /* VF Port Extender (PE) stag if used */
u16 stag; u16 stag;
struct i40e_virtchnl_ether_addr default_lan_addr; struct virtchnl_ether_addr default_lan_addr;
u16 port_vlan_id; u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */ bool pf_set_mac; /* The VMM admin set the VF MAC address */
bool trusted; bool trusted;
...@@ -115,7 +112,7 @@ struct i40e_vf { ...@@ -115,7 +112,7 @@ struct i40e_vf {
u16 num_vlan; u16 num_vlan;
/* RDMA Client */ /* RDMA Client */
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info; struct virtchnl_iwarp_qvlist_info *qvlist_info;
}; };
void i40e_free_vfs(struct i40e_pf *pf); void i40e_free_vfs(struct i40e_pf *pf);
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_adminq.h" #include "i40e_adminq.h"
#include "i40e_prototype.h" #include "i40e_prototype.h"
#include "i40e_virtchnl.h" #include <linux/avf/virtchnl.h>
/** /**
* i40e_set_mac_type - Sets MAC type * i40e_set_mac_type - Sets MAC type
...@@ -68,6 +68,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) ...@@ -68,6 +68,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
break; break;
case I40E_DEV_ID_VF: case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV: case I40E_DEV_ID_VF_HV:
case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF; hw->mac.type = I40E_MAC_VF;
break; break;
default: default:
...@@ -1054,7 +1055,7 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) ...@@ -1054,7 +1055,7 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
* completion before returning. * completion before returning.
**/ **/
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode, enum virtchnl_ops v_opcode,
i40e_status v_retval, i40e_status v_retval,
u8 *msg, u16 msglen, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details) struct i40e_asq_cmd_details *cmd_details)
...@@ -1092,9 +1093,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, ...@@ -1092,9 +1093,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
* with appropriate information. * with appropriate information.
**/ **/
void i40e_vf_parse_hw_config(struct i40e_hw *hw, void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg) struct virtchnl_vf_resource *msg)
{ {
struct i40e_virtchnl_vsi_resource *vsi_res; struct virtchnl_vsi_resource *vsi_res;
int i; int i;
vsi_res = &msg->vsi_res[0]; vsi_res = &msg->vsi_res[0];
...@@ -1104,10 +1105,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, ...@@ -1104,10 +1105,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags & hw->dev_caps.dcb = msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_L2; VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.fcoe = 0; hw->dev_caps.fcoe = 0;
for (i = 0; i < msg->num_vsis; i++) { for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == I40E_VSI_SRIOV) { if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
ether_addr_copy(hw->mac.perm_addr, ether_addr_copy(hw->mac.perm_addr,
vsi_res->default_mac_addr); vsi_res->default_mac_addr);
ether_addr_copy(hw->mac.addr, ether_addr_copy(hw->mac.addr,
...@@ -1127,7 +1128,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, ...@@ -1127,7 +1128,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
**/ **/
i40e_status i40e_vf_reset(struct i40e_hw *hw) i40e_status i40e_vf_reset(struct i40e_hw *hw)
{ {
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL); 0, NULL, 0, NULL);
} }
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#define I40E_DEV_ID_25G_SFP28 0x158B #define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571 #define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_alloc.h" #include "i40e_alloc.h"
#include "i40e_virtchnl.h" #include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in /* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are * the standard function pointer structures. These are
...@@ -87,10 +87,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) ...@@ -87,10 +87,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
/* i40e_common for VF drivers*/ /* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw, void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg); struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw); i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode, enum virtchnl_ops v_opcode,
i40e_status v_retval, i40e_status v_retval,
u8 *msg, u16 msglen, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#include <net/udp.h> #include <net/udp.h>
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_virtchnl.h" #include <linux/avf/virtchnl.h>
#include "i40e_txrx.h" #include "i40e_txrx.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
...@@ -263,26 +263,26 @@ struct i40evf_adapter { ...@@ -263,26 +263,26 @@ struct i40evf_adapter {
struct work_struct watchdog_task; struct work_struct watchdog_task;
bool netdev_registered; bool netdev_registered;
bool link_up; bool link_up;
enum i40e_aq_link_speed link_speed; enum virtchnl_link_speed link_speed;
enum i40e_virtchnl_ops current_op; enum virtchnl_ops current_op;
#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \ (_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \ VIRTCHNL_VF_OFFLOAD_IWARP : \
0) 0)
#define CLIENT_ENABLED(_a) ((_a)->cinst) #define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */ /* RSS by the PF should be preferred over RSS via other methods. */
#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \ #define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) VIRTCHNL_VF_OFFLOAD_RSS_PF)
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ #define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) VIRTCHNL_VF_OFFLOAD_RSS_AQ)
#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \ #define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
(I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF))) VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN) VIRTCHNL_VF_OFFLOAD_VLAN)
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct i40e_virtchnl_version_info pf_version; struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1)) ((_a)->pf_version.minor == 1))
u16 msg_enable; u16 msg_enable;
...@@ -348,7 +348,7 @@ void i40evf_set_hena(struct i40evf_adapter *adapter); ...@@ -348,7 +348,7 @@ void i40evf_set_hena(struct i40evf_adapter *adapter);
void i40evf_set_rss_key(struct i40evf_adapter *adapter); void i40evf_set_rss_key(struct i40evf_adapter *adapter);
void i40evf_set_rss_lut(struct i40evf_adapter *adapter); void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode, enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen); i40e_status v_retval, u8 *msg, u16 msglen);
int i40evf_config_rss(struct i40evf_adapter *adapter); int i40evf_config_rss(struct i40evf_adapter *adapter);
int i40evf_lan_add_device(struct i40evf_adapter *adapter); int i40evf_lan_add_device(struct i40evf_adapter *adapter);
......
...@@ -120,7 +120,7 @@ static int i40evf_client_release_qvlist(struct i40e_info *ldev) ...@@ -120,7 +120,7 @@ static int i40evf_client_release_qvlist(struct i40e_info *ldev)
return -EAGAIN; return -EAGAIN;
err = i40e_aq_send_msg_to_pf(&adapter->hw, err = i40e_aq_send_msg_to_pf(&adapter->hw,
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
I40E_SUCCESS, NULL, 0, NULL); I40E_SUCCESS, NULL, 0, NULL);
if (err) if (err)
...@@ -410,7 +410,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, ...@@ -410,7 +410,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
if (adapter->aq_required) if (adapter->aq_required)
return -EAGAIN; return -EAGAIN;
err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP, err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
I40E_SUCCESS, msg, len, NULL); I40E_SUCCESS, msg, len, NULL);
if (err) if (err)
dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
...@@ -431,7 +431,7 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev, ...@@ -431,7 +431,7 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_client *client, struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info) struct i40e_qvlist_info *qvlist_info)
{ {
struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info; struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
struct i40evf_adapter *adapter = ldev->vf; struct i40evf_adapter *adapter = ldev->vf;
struct i40e_qv_info *qv_info; struct i40e_qv_info *qv_info;
i40e_status err; i40e_status err;
...@@ -453,14 +453,14 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev, ...@@ -453,14 +453,14 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
return -EINVAL; return -EINVAL;
} }
v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info; v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) +
(sizeof(struct i40e_virtchnl_iwarp_qv_info) * (sizeof(struct virtchnl_iwarp_qv_info) *
(v_qvlist_info->num_vectors - 1)); (v_qvlist_info->num_vectors - 1));
adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
err = i40e_aq_send_msg_to_pf(&adapter->hw, err = i40e_aq_send_msg_to_pf(&adapter->hw,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
if (err) { if (err) {
...@@ -474,7 +474,7 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev, ...@@ -474,7 +474,7 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
msleep(100); msleep(100);
if (!(adapter->client_pending & if (!(adapter->client_pending &
BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
err = 0; err = 0;
break; break;
} }
......
...@@ -44,9 +44,9 @@ static const char i40evf_driver_string[] = ...@@ -44,9 +44,9 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k" #define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2 #define DRV_VERSION_MAJOR 3
#define DRV_VERSION_MINOR 1 #define DRV_VERSION_MINOR 0
#define DRV_VERSION_BUILD 14 #define DRV_VERSION_BUILD 0
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \ __stringify(DRV_VERSION_BUILD) \
...@@ -67,6 +67,7 @@ static const struct pci_device_id i40evf_pci_tbl[] = { ...@@ -67,6 +67,7 @@ static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
/* required last entry */ /* required last entry */
{0, } {0, }
}; };
...@@ -1131,7 +1132,7 @@ void i40evf_down(struct i40evf_adapter *adapter) ...@@ -1131,7 +1132,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) { adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */ /* cancel any current operation */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait /* Schedule operations to close down the HW. Don't wait
* here for this to complete. The watchdog is still running * here for this to complete. The watchdog is still running
* and it will take care of this. * and it will take care of this.
...@@ -1311,7 +1312,7 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) ...@@ -1311,7 +1312,7 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
int ret = 0; int ret = 0;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
adapter->current_op); adapter->current_op);
...@@ -1410,7 +1411,7 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter) ...@@ -1410,7 +1411,7 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter)
if (!RSS_PF(adapter)) { if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_offload_flags & if (adapter->vf_res->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
else else
adapter->hena = I40E_DEFAULT_RSS_HENA; adapter->hena = I40E_DEFAULT_RSS_HENA;
...@@ -1588,8 +1589,8 @@ static void i40evf_watchdog_task(struct work_struct *work) ...@@ -1588,8 +1589,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
reg_val = rd32(hw, I40E_VFGEN_RSTAT) & reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK; I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((reg_val == I40E_VFR_VFACTIVE) || if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
(reg_val == I40E_VFR_COMPLETED)) { (reg_val == VIRTCHNL_VFR_COMPLETED)) {
/* A chance for redemption! */ /* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
adapter->state = __I40EVF_STARTUP; adapter->state = __I40EVF_STARTUP;
...@@ -1605,7 +1606,7 @@ static void i40evf_watchdog_task(struct work_struct *work) ...@@ -1605,7 +1606,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
return; return;
} }
adapter->aq_required = 0; adapter->aq_required = 0;
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
goto watchdog_done; goto watchdog_done;
} }
...@@ -1621,7 +1622,7 @@ static void i40evf_watchdog_task(struct work_struct *work) ...@@ -1621,7 +1622,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task); schedule_work(&adapter->reset_task);
adapter->aq_required = 0; adapter->aq_required = 0;
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
goto watchdog_done; goto watchdog_done;
} }
...@@ -1707,13 +1708,13 @@ static void i40evf_watchdog_task(struct work_struct *work) ...@@ -1707,13 +1708,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
} }
if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC | i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
I40E_FLAG_VF_MULTICAST_PROMISC); FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done; goto watchdog_done;
} }
if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC); i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done; goto watchdog_done;
} }
...@@ -1854,7 +1855,7 @@ static void i40evf_reset_task(struct work_struct *work) ...@@ -1854,7 +1855,7 @@ static void i40evf_reset_task(struct work_struct *work)
reg_val = rd32(hw, I40E_VFGEN_RSTAT) & reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK; I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == I40E_VFR_VFACTIVE) if (reg_val == VIRTCHNL_VFR_VFACTIVE)
break; break;
} }
...@@ -1888,7 +1889,7 @@ static void i40evf_reset_task(struct work_struct *work) ...@@ -1888,7 +1889,7 @@ static void i40evf_reset_task(struct work_struct *work)
/* kill and reinit the admin queue */ /* kill and reinit the admin queue */
i40evf_shutdown_adminq(hw); i40evf_shutdown_adminq(hw);
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw); err = i40evf_init_adminq(hw);
if (err) if (err)
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
...@@ -1949,7 +1950,7 @@ static void i40evf_adminq_task(struct work_struct *work) ...@@ -1949,7 +1950,7 @@ static void i40evf_adminq_task(struct work_struct *work)
container_of(work, struct i40evf_adapter, adminq_task); container_of(work, struct i40evf_adapter, adminq_task);
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event; struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg; struct virtchnl_msg *v_msg;
i40e_status ret; i40e_status ret;
u32 val, oldval; u32 val, oldval;
u16 pending; u16 pending;
...@@ -1962,14 +1963,15 @@ static void i40evf_adminq_task(struct work_struct *work) ...@@ -1962,14 +1963,15 @@ static void i40evf_adminq_task(struct work_struct *work)
if (!event.msg_buf) if (!event.msg_buf)
goto out; goto out;
v_msg = (struct i40e_virtchnl_msg *)&event.desc; v_msg = (struct virtchnl_msg *)&event.desc;
do { do {
ret = i40evf_clean_arq_element(hw, &event, &pending); ret = i40evf_clean_arq_element(hw, &event, &pending);
if (ret || !v_msg->v_opcode) if (ret || !v_msg->v_opcode)
break; /* No event to process or error cleaning ARQ */ break; /* No event to process or error cleaning ARQ */
i40evf_virtchnl_completion(adapter, v_msg->v_opcode, i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
v_msg->v_retval, event.msg_buf, (i40e_status)v_msg->v_retval,
event.msg_buf,
event.msg_len); event.msg_len);
if (pending != 0) if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
...@@ -2347,7 +2349,7 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev, ...@@ -2347,7 +2349,7 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev,
struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40evf_adapter *adapter = netdev_priv(netdev);
features &= ~I40EVF_VLAN_FEATURES; features &= ~I40EVF_VLAN_FEATURES;
if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) if (adapter->vf_res->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
features |= I40EVF_VLAN_FEATURES; features |= I40EVF_VLAN_FEATURES;
return features; return features;
} }
...@@ -2384,8 +2386,8 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) ...@@ -2384,8 +2386,8 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
rstat = rd32(hw, I40E_VFGEN_RSTAT) & rstat = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK; I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((rstat == I40E_VFR_VFACTIVE) || if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
(rstat == I40E_VFR_COMPLETED)) (rstat == VIRTCHNL_VFR_COMPLETED))
return 0; return 0;
usleep_range(10, 20); usleep_range(10, 20);
} }
...@@ -2401,7 +2403,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) ...@@ -2401,7 +2403,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
**/ **/
int i40evf_process_config(struct i40evf_adapter *adapter) int i40evf_process_config(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res; struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi; struct i40e_vsi *vsi = &adapter->vsi;
int i; int i;
...@@ -2410,7 +2412,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) ...@@ -2410,7 +2412,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
/* got VF config message back from PF, now we can parse it */ /* got VF config message back from PF, now we can parse it */
for (i = 0; i < vfres->num_vsis; i++) { for (i = 0; i < vfres->num_vsis; i++) {
if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV) if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
adapter->vsi_res = &vfres->vsi_res[i]; adapter->vsi_res = &vfres->vsi_res[i];
} }
if (!adapter->vsi_res) { if (!adapter->vsi_res) {
...@@ -2434,7 +2436,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) ...@@ -2434,7 +2436,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
/* advertise to stack only if offloads for encapsulated packets is /* advertise to stack only if offloads for encapsulated packets is
* supported * supported
*/ */
if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) { if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_GRE_CSUM |
...@@ -2445,7 +2447,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) ...@@ -2445,7 +2447,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
0; 0;
if (!(vfres->vf_offload_flags & if (!(vfres->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
netdev->gso_partial_features |= netdev->gso_partial_features |=
NETIF_F_GSO_UDP_TUNNEL_CSUM; NETIF_F_GSO_UDP_TUNNEL_CSUM;
...@@ -2472,7 +2474,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter) ...@@ -2472,7 +2474,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev; vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle; vsi->qs_handle = adapter->vsi_res->qset_handle;
if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
adapter->rss_key_size = vfres->rss_key_size; adapter->rss_key_size = vfres->rss_key_size;
adapter->rss_lut_size = vfres->rss_lut_size; adapter->rss_lut_size = vfres->rss_lut_size;
} else { } else {
...@@ -2558,8 +2560,8 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2558,8 +2560,8 @@ static void i40evf_init_task(struct work_struct *work)
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
adapter->pf_version.major, adapter->pf_version.major,
adapter->pf_version.minor, adapter->pf_version.minor,
I40E_VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MAJOR,
I40E_VIRTCHNL_VERSION_MINOR); VIRTCHNL_VERSION_MINOR);
goto err; goto err;
} }
err = i40evf_send_vf_config_msg(adapter); err = i40evf_send_vf_config_msg(adapter);
...@@ -2573,9 +2575,9 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2573,9 +2575,9 @@ static void i40evf_init_task(struct work_struct *work)
case __I40EVF_INIT_GET_RESOURCES: case __I40EVF_INIT_GET_RESOURCES:
/* aq msg sent, awaiting reply */ /* aq msg sent, awaiting reply */
if (!adapter->vf_res) { if (!adapter->vf_res) {
bufsz = sizeof(struct i40e_virtchnl_vf_resource) + bufsz = sizeof(struct virtchnl_vf_resource) +
(I40E_MAX_VF_VSI * (I40E_MAX_VF_VSI *
sizeof(struct i40e_virtchnl_vsi_resource)); sizeof(struct virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
if (!adapter->vf_res) if (!adapter->vf_res)
goto err; goto err;
...@@ -2606,7 +2608,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2606,7 +2608,7 @@ static void i40evf_init_task(struct work_struct *work)
if (i40evf_process_config(adapter)) if (i40evf_process_config(adapter))
goto err_alloc; goto err_alloc;
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
...@@ -2644,7 +2646,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2644,7 +2646,7 @@ static void i40evf_init_task(struct work_struct *work)
goto err_sw_init; goto err_sw_init;
i40evf_map_rings_to_vectors(adapter); i40evf_map_rings_to_vectors(adapter);
if (adapter->vf_res->vf_offload_flags & if (adapter->vf_res->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
err = i40evf_request_misc_irq(adapter); err = i40evf_request_misc_irq(adapter);
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* Send message to PF and print status if failure. * Send message to PF and print status if failure.
**/ **/
static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops op, u8 *msg, u16 len) enum virtchnl_ops op, u8 *msg, u16 len)
{ {
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
i40e_status err; i40e_status err;
...@@ -68,12 +68,12 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, ...@@ -68,12 +68,12 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
**/ **/
int i40evf_send_api_ver(struct i40evf_adapter *adapter) int i40evf_send_api_ver(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_version_info vvi; struct virtchnl_version_info vvi;
vvi.major = I40E_VIRTCHNL_VERSION_MAJOR; vvi.major = VIRTCHNL_VERSION_MAJOR;
vvi.minor = I40E_VIRTCHNL_VERSION_MINOR; vvi.minor = VIRTCHNL_VERSION_MINOR;
return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi, return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
sizeof(vvi)); sizeof(vvi));
} }
...@@ -88,10 +88,10 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter) ...@@ -88,10 +88,10 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter)
**/ **/
int i40evf_verify_api_ver(struct i40evf_adapter *adapter) int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_version_info *pf_vvi; struct virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event; struct i40e_arq_event_info event;
enum i40e_virtchnl_ops op; enum virtchnl_ops op;
i40e_status err; i40e_status err;
event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
...@@ -109,8 +109,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) ...@@ -109,8 +109,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
if (err) if (err)
goto out_alloc; goto out_alloc;
op = op =
(enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
if (op == I40E_VIRTCHNL_OP_VERSION) if (op == VIRTCHNL_OP_VERSION)
break; break;
} }
...@@ -119,19 +119,19 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) ...@@ -119,19 +119,19 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
if (err) if (err)
goto out_alloc; goto out_alloc;
if (op != I40E_VIRTCHNL_OP_VERSION) { if (op != VIRTCHNL_OP_VERSION) {
dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
op); op);
err = -EIO; err = -EIO;
goto out_alloc; goto out_alloc;
} }
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
adapter->pf_version = *pf_vvi; adapter->pf_version = *pf_vvi;
if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
(pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
err = -EIO; err = -EIO;
out_alloc: out_alloc:
...@@ -152,25 +152,25 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) ...@@ -152,25 +152,25 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
{ {
u32 caps; u32 caps;
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | caps = VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF | VIRTCHNL_VF_OFFLOAD_RSS_PF |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | VIRTCHNL_VF_OFFLOAD_RSS_AQ |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_RSS_REG |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | VIRTCHNL_VF_OFFLOAD_VLAN |
I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
I40E_VIRTCHNL_VF_OFFLOAD_ENCAP | VIRTCHNL_VF_OFFLOAD_ENCAP |
I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
if (PF_IS_V11(adapter)) if (PF_IS_V11(adapter))
return i40evf_send_pf_msg(adapter, return i40evf_send_pf_msg(adapter,
I40E_VIRTCHNL_OP_GET_VF_RESOURCES, VIRTCHNL_OP_GET_VF_RESOURCES,
(u8 *)&caps, sizeof(caps)); (u8 *)&caps, sizeof(caps));
else else
return i40evf_send_pf_msg(adapter, return i40evf_send_pf_msg(adapter,
I40E_VIRTCHNL_OP_GET_VF_RESOURCES, VIRTCHNL_OP_GET_VF_RESOURCES,
NULL, 0); NULL, 0);
} }
...@@ -188,12 +188,12 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) ...@@ -188,12 +188,12 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
{ {
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event; struct i40e_arq_event_info event;
enum i40e_virtchnl_ops op; enum virtchnl_ops op;
i40e_status err; i40e_status err;
u16 len; u16 len;
len = sizeof(struct i40e_virtchnl_vf_resource) + len = sizeof(struct virtchnl_vf_resource) +
I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len; event.buf_len = len;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) { if (!event.msg_buf) {
...@@ -209,8 +209,8 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) ...@@ -209,8 +209,8 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
if (err) if (err)
goto out_alloc; goto out_alloc;
op = op =
(enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
break; break;
} }
...@@ -232,20 +232,20 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) ...@@ -232,20 +232,20 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
**/ **/
void i40evf_configure_queues(struct i40evf_adapter *adapter) void i40evf_configure_queues(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_vsi_queue_config_info *vqci; struct virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi; struct virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues; int pairs = adapter->num_active_queues;
int i, len, max_frame = I40E_MAX_RXBUFFER; int i, len, max_frame = I40E_MAX_RXBUFFER;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + len = sizeof(struct virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); (sizeof(struct virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_KERNEL); vqci = kzalloc(len, GFP_KERNEL);
if (!vqci) if (!vqci)
return; return;
...@@ -278,7 +278,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -278,7 +278,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
} }
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len); (u8 *)vqci, len);
kfree(vqci); kfree(vqci);
} }
...@@ -291,20 +291,20 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -291,20 +291,20 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
**/ **/
void i40evf_enable_queues(struct i40evf_adapter *adapter) void i40evf_enable_queues(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_queue_select vqs; struct virtchnl_queue_select vqs;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues; vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs)); (u8 *)&vqs, sizeof(vqs));
} }
...@@ -316,20 +316,20 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) ...@@ -316,20 +316,20 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
**/ **/
void i40evf_disable_queues(struct i40evf_adapter *adapter) void i40evf_disable_queues(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_queue_select vqs; struct virtchnl_queue_select vqs;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues; vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs)); (u8 *)&vqs, sizeof(vqs));
} }
...@@ -342,23 +342,23 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) ...@@ -342,23 +342,23 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
**/ **/
void i40evf_map_queues(struct i40evf_adapter *adapter) void i40evf_map_queues(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_irq_map_info *vimi; struct virtchnl_irq_map_info *vimi;
int v_idx, q_vectors, len; int v_idx, q_vectors, len;
struct i40e_q_vector *q_vector; struct i40e_q_vector *q_vector;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
q_vectors = adapter->num_msix_vectors - NONQ_VECS; q_vectors = adapter->num_msix_vectors - NONQ_VECS;
len = sizeof(struct i40e_virtchnl_irq_map_info) + len = sizeof(struct virtchnl_irq_map_info) +
(adapter->num_msix_vectors * (adapter->num_msix_vectors *
sizeof(struct i40e_virtchnl_vector_map)); sizeof(struct virtchnl_vector_map));
vimi = kzalloc(len, GFP_KERNEL); vimi = kzalloc(len, GFP_KERNEL);
if (!vimi) if (!vimi)
return; return;
...@@ -379,7 +379,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) ...@@ -379,7 +379,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
vimi->vecmap[v_idx].rxq_map = 0; vimi->vecmap[v_idx].rxq_map = 0;
adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vimi, len); (u8 *)vimi, len);
kfree(vimi); kfree(vimi);
} }
...@@ -394,12 +394,12 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) ...@@ -394,12 +394,12 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
**/ **/
void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_ether_addr_list *veal; struct virtchnl_ether_addr_list *veal;
int len, i = 0, count = 0; int len, i = 0, count = 0;
struct i40evf_mac_filter *f; struct i40evf_mac_filter *f;
bool more = false; bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
adapter->current_op); adapter->current_op);
...@@ -413,17 +413,17 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ...@@ -413,17 +413,17 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
len = sizeof(struct i40e_virtchnl_ether_addr_list) + len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr)); (count * sizeof(struct virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) { if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE - count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr); sizeof(struct virtchnl_ether_addr);
len = sizeof(struct i40e_virtchnl_ether_addr_list) + len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr)); (count * sizeof(struct virtchnl_ether_addr));
more = true; more = true;
} }
...@@ -444,7 +444,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ...@@ -444,7 +444,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
} }
if (!more) if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len); (u8 *)veal, len);
kfree(veal); kfree(veal);
} }
...@@ -459,12 +459,12 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ...@@ -459,12 +459,12 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
**/ **/
void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_ether_addr_list *veal; struct virtchnl_ether_addr_list *veal;
struct i40evf_mac_filter *f, *ftmp; struct i40evf_mac_filter *f, *ftmp;
int len, i = 0, count = 0; int len, i = 0, count = 0;
bool more = false; bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
adapter->current_op); adapter->current_op);
...@@ -478,17 +478,17 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) ...@@ -478,17 +478,17 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
len = sizeof(struct i40e_virtchnl_ether_addr_list) + len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr)); (count * sizeof(struct virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) { if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE - count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr); sizeof(struct virtchnl_ether_addr);
len = sizeof(struct i40e_virtchnl_ether_addr_list) + len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr)); (count * sizeof(struct virtchnl_ether_addr));
more = true; more = true;
} }
veal = kzalloc(len, GFP_KERNEL); veal = kzalloc(len, GFP_KERNEL);
...@@ -509,7 +509,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) ...@@ -509,7 +509,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
} }
if (!more) if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len); (u8 *)veal, len);
kfree(veal); kfree(veal);
} }
...@@ -524,12 +524,12 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) ...@@ -524,12 +524,12 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
**/ **/
void i40evf_add_vlans(struct i40evf_adapter *adapter) void i40evf_add_vlans(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_vlan_filter_list *vvfl; struct virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0; int len, i = 0, count = 0;
struct i40evf_vlan_filter *f; struct i40evf_vlan_filter *f;
bool more = false; bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
adapter->current_op); adapter->current_op);
...@@ -544,16 +544,16 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) ...@@ -544,16 +544,16 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN; adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
len = sizeof(struct i40e_virtchnl_vlan_filter_list) + len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16)); (count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) { if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE - count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16); sizeof(u16);
len = sizeof(struct i40e_virtchnl_vlan_filter_list) + len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16)); (count * sizeof(u16));
more = true; more = true;
} }
...@@ -574,7 +574,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) ...@@ -574,7 +574,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
} }
if (!more) if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl); kfree(vvfl);
} }
...@@ -588,12 +588,12 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) ...@@ -588,12 +588,12 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
**/ **/
void i40evf_del_vlans(struct i40evf_adapter *adapter) void i40evf_del_vlans(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_vlan_filter_list *vvfl; struct virtchnl_vlan_filter_list *vvfl;
struct i40evf_vlan_filter *f, *ftmp; struct i40evf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0; int len, i = 0, count = 0;
bool more = false; bool more = false;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
adapter->current_op); adapter->current_op);
...@@ -608,16 +608,16 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) ...@@ -608,16 +608,16 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN; adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
len = sizeof(struct i40e_virtchnl_vlan_filter_list) + len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16)); (count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) { if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE - count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16); sizeof(u16);
len = sizeof(struct i40e_virtchnl_vlan_filter_list) + len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16)); (count * sizeof(u16));
more = true; more = true;
} }
...@@ -639,7 +639,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) ...@@ -639,7 +639,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
} }
if (!more) if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl); kfree(vvfl);
} }
...@@ -652,25 +652,25 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) ...@@ -652,25 +652,25 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
**/ **/
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
{ {
struct i40e_virtchnl_promisc_info vpi; struct virtchnl_promisc_info vpi;
int promisc_all; int promisc_all;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
promisc_all = I40E_FLAG_VF_UNICAST_PROMISC | promisc_all = FLAG_VF_UNICAST_PROMISC |
I40E_FLAG_VF_MULTICAST_PROMISC; FLAG_VF_MULTICAST_PROMISC;
if ((flags & promisc_all) == promisc_all) { if ((flags & promisc_all) == promisc_all) {
adapter->flags |= I40EVF_FLAG_PROMISC_ON; adapter->flags |= I40EVF_FLAG_PROMISC_ON;
adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
} }
if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) { if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
...@@ -682,10 +682,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) ...@@ -682,10 +682,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
} }
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags; vpi.flags = flags;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
(u8 *)&vpi, sizeof(vpi)); (u8 *)&vpi, sizeof(vpi));
} }
...@@ -697,19 +697,19 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) ...@@ -697,19 +697,19 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
**/ **/
void i40evf_request_stats(struct i40evf_adapter *adapter) void i40evf_request_stats(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_queue_select vqs; struct virtchnl_queue_select vqs;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* no error message, this isn't crucial */ /* no error message, this isn't crucial */
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS; adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.vsi_id = adapter->vsi_res->vsi_id;
/* queue maps are ignored for this message - only the vsi is used */ /* queue maps are ignored for this message - only the vsi is used */
if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS, if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs))) (u8 *)&vqs, sizeof(vqs)))
/* if the request failed, don't lock out others */ /* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
} }
/** /**
...@@ -720,15 +720,15 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) ...@@ -720,15 +720,15 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
**/ **/
void i40evf_get_hena(struct i40evf_adapter *adapter) void i40evf_get_hena(struct i40evf_adapter *adapter)
{ {
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS; adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
NULL, 0); NULL, 0);
} }
...@@ -740,18 +740,18 @@ void i40evf_get_hena(struct i40evf_adapter *adapter) ...@@ -740,18 +740,18 @@ void i40evf_get_hena(struct i40evf_adapter *adapter)
**/ **/
void i40evf_set_hena(struct i40evf_adapter *adapter) void i40evf_set_hena(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_rss_hena vrh; struct virtchnl_rss_hena vrh;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
vrh.hena = adapter->hena; vrh.hena = adapter->hena;
adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA; adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&vrh, sizeof(vrh)); (u8 *)&vrh, sizeof(vrh));
} }
...@@ -763,16 +763,16 @@ void i40evf_set_hena(struct i40evf_adapter *adapter) ...@@ -763,16 +763,16 @@ void i40evf_set_hena(struct i40evf_adapter *adapter)
**/ **/
void i40evf_set_rss_key(struct i40evf_adapter *adapter) void i40evf_set_rss_key(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_rss_key *vrk; struct virtchnl_rss_key *vrk;
int len; int len;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
len = sizeof(struct i40e_virtchnl_rss_key) + len = sizeof(struct virtchnl_rss_key) +
(adapter->rss_key_size * sizeof(u8)) - 1; (adapter->rss_key_size * sizeof(u8)) - 1;
vrk = kzalloc(len, GFP_KERNEL); vrk = kzalloc(len, GFP_KERNEL);
if (!vrk) if (!vrk)
...@@ -781,9 +781,9 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter) ...@@ -781,9 +781,9 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter)
vrk->key_len = adapter->rss_key_size; vrk->key_len = adapter->rss_key_size;
memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY; adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
(u8 *)vrk, len); (u8 *)vrk, len);
kfree(vrk); kfree(vrk);
} }
...@@ -796,16 +796,16 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter) ...@@ -796,16 +796,16 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter)
**/ **/
void i40evf_set_rss_lut(struct i40evf_adapter *adapter) void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
{ {
struct i40e_virtchnl_rss_lut *vrl; struct virtchnl_rss_lut *vrl;
int len; int len;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return;
} }
len = sizeof(struct i40e_virtchnl_rss_lut) + len = sizeof(struct virtchnl_rss_lut) +
(adapter->rss_lut_size * sizeof(u8)) - 1; (adapter->rss_lut_size * sizeof(u8)) - 1;
vrl = kzalloc(len, GFP_KERNEL); vrl = kzalloc(len, GFP_KERNEL);
if (!vrl) if (!vrl)
...@@ -813,9 +813,9 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) ...@@ -813,9 +813,9 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
vrl->vsi_id = adapter->vsi.id; vrl->vsi_id = adapter->vsi.id;
vrl->lut_entries = adapter->rss_lut_size; vrl->lut_entries = adapter->rss_lut_size;
memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT; adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
(u8 *)vrl, len); (u8 *)vrl, len);
kfree(vrl); kfree(vrl);
} }
...@@ -871,8 +871,8 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter) ...@@ -871,8 +871,8 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
void i40evf_request_reset(struct i40evf_adapter *adapter) void i40evf_request_reset(struct i40evf_adapter *adapter)
{ {
/* Don't check CURRENT_OP - this is always higher priority */ /* Don't check CURRENT_OP - this is always higher priority */
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
} }
/** /**
...@@ -888,17 +888,17 @@ void i40evf_request_reset(struct i40evf_adapter *adapter) ...@@ -888,17 +888,17 @@ void i40evf_request_reset(struct i40evf_adapter *adapter)
* This function handles the reply messages. * This function handles the reply messages.
**/ **/
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode, enum virtchnl_ops v_opcode,
i40e_status v_retval, i40e_status v_retval,
u8 *msg, u16 msglen) u8 *msg, u16 msglen)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { if (v_opcode == VIRTCHNL_OP_EVENT) {
struct i40e_virtchnl_pf_event *vpe = struct virtchnl_pf_event *vpe =
(struct i40e_virtchnl_pf_event *)msg; (struct virtchnl_pf_event *)msg;
switch (vpe->event) { switch (vpe->event) {
case I40E_VIRTCHNL_EVENT_LINK_CHANGE: case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed = adapter->link_speed =
vpe->event_data.link_event.link_speed; vpe->event_data.link_event.link_speed;
if (adapter->link_up != if (adapter->link_up !=
...@@ -915,7 +915,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -915,7 +915,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_print_link_message(adapter); i40evf_print_link_message(adapter);
} }
break; break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n"); dev_info(&adapter->pdev->dev, "PF reset warning received\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_PENDING; adapter->flags |= I40EVF_FLAG_RESET_PENDING;
...@@ -932,19 +932,19 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -932,19 +932,19 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
} }
if (v_retval) { if (v_retval) {
switch (v_opcode) { switch (v_opcode) {
case I40E_VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_ADD_VLAN:
dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval)); i40evf_stat_str(&adapter->hw, v_retval));
break; break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval)); i40evf_stat_str(&adapter->hw, v_retval));
break; break;
case I40E_VIRTCHNL_OP_DEL_VLAN: case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval)); i40evf_stat_str(&adapter->hw, v_retval));
break; break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: case VIRTCHNL_OP_DEL_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval)); i40evf_stat_str(&adapter->hw, v_retval));
break; break;
...@@ -956,7 +956,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -956,7 +956,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
} }
} }
switch (v_opcode) { switch (v_opcode) {
case I40E_VIRTCHNL_OP_GET_STATS: { case VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats = struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg; (struct i40e_eth_stats *)msg;
netdev->stats.rx_packets = stats->rx_unicast + netdev->stats.rx_packets = stats->rx_unicast +
...@@ -973,10 +973,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -973,10 +973,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->current_stats = *stats; adapter->current_stats = *stats;
} }
break; break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: { case VIRTCHNL_OP_GET_VF_RESOURCES: {
u16 len = sizeof(struct i40e_virtchnl_vf_resource) + u16 len = sizeof(struct virtchnl_vf_resource) +
I40E_MAX_VF_VSI * I40E_MAX_VF_VSI *
sizeof(struct i40e_virtchnl_vsi_resource); sizeof(struct virtchnl_vsi_resource);
memcpy(adapter->vf_res, msg, min(msglen, len)); memcpy(adapter->vf_res, msg, min(msglen, len));
i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
/* restore current mac address */ /* restore current mac address */
...@@ -984,18 +984,18 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -984,18 +984,18 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_process_config(adapter); i40evf_process_config(adapter);
} }
break; break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES: case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */ /* enable transmits */
i40evf_irq_enable(adapter, true); i40evf_irq_enable(adapter, true);
break; break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES: case VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter); i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter); i40evf_free_all_rx_resources(adapter);
if (adapter->state == __I40EVF_DOWN_PENDING) if (adapter->state == __I40EVF_DOWN_PENDING)
adapter->state = __I40EVF_DOWN; adapter->state = __I40EVF_DOWN;
break; break;
case I40E_VIRTCHNL_OP_VERSION: case VIRTCHNL_OP_VERSION:
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: case VIRTCHNL_OP_CONFIG_IRQ_MAP:
/* Don't display an error if we get these out of sequence. /* Don't display an error if we get these out of sequence.
* If the firmware needed to get kicked, we'll get these and * If the firmware needed to get kicked, we'll get these and
* it's no problem. * it's no problem.
...@@ -1003,7 +1003,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -1003,7 +1003,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op) if (v_opcode != adapter->current_op)
return; return;
break; break;
case I40E_VIRTCHNL_OP_IWARP: case VIRTCHNL_OP_IWARP:
/* Gobble zero-length replies from the PF. They indicate that /* Gobble zero-length replies from the PF. They indicate that
* a previous message was received OK, and the client doesn't * a previous message was received OK, and the client doesn't
* care about that. * care about that.
...@@ -1013,13 +1013,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -1013,13 +1013,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
msg, msglen); msg, msglen);
break; break;
case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
adapter->client_pending &= adapter->client_pending &=
~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
break; break;
case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: { case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct i40e_virtchnl_rss_hena *vrh = struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
(struct i40e_virtchnl_rss_hena *)msg;
if (msglen == sizeof(*vrh)) if (msglen == sizeof(*vrh))
adapter->hena = vrh->hena; adapter->hena = vrh->hena;
else else
...@@ -1033,5 +1032,5 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -1033,5 +1032,5 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->current_op, v_opcode); adapter->current_op, v_opcode);
break; break;
} /* switch v_opcode */ } /* switch v_opcode */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = VIRTCHNL_OP_UNKNOWN;
} }
...@@ -24,87 +24,141 @@ ...@@ -24,87 +24,141 @@
* *
******************************************************************************/ ******************************************************************************/
#ifndef _I40E_VIRTCHNL_H_ #ifndef _VIRTCHNL_H_
#define _I40E_VIRTCHNL_H_ #define _VIRTCHNL_H_
#include "i40e_type.h"
/* Description: /* Description:
* This header file describes the VF-PF communication protocol used * This header file describes the VF-PF communication protocol used
* by the various i40e drivers. * by the drivers for all devices starting from our 40G product line
* *
* Admin queue buffer usage: * Admin queue buffer usage:
* desc->opcode is always i40e_aqc_opc_send_msg_to_pf * desc->opcode is always aqc_opc_send_msg_to_pf
* flags, retval, datalen, and data addr are all used normally. * flags, retval, datalen, and data addr are all used normally.
* Firmware copies the cookie fields when sending messages between the PF and * The Firmware copies the cookie fields when sending messages between the
* VF, but uses all other fields internally. Due to this limitation, we * PF and VF, but uses all other fields internally. Due to this limitation,
* must send all messages as "indirect", i.e. using an external buffer. * we must send all messages as "indirect", i.e. using an external buffer.
* *
* All the vsi indexes are relative to the VF. Each VF can have maximum of * All the VSI indexes are relative to the VF. Each VF can have maximum of
* three VSIs. All the queue indexes are relative to the VSI. Each VF can * three VSIs. All the queue indexes are relative to the VSI. Each VF can
* have a maximum of sixteen queues for all of its VSIs. * have a maximum of sixteen queues for all of its VSIs.
* *
* The PF is required to return a status code in v_retval for all messages * The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value is of * except RESET_VF, which does not require any response. The return value
* i40e_status_code type, defined in the i40e_type.h. * is of status_code type, defined in the shared type.h.
* *
* In general, VF driver initialization should roughly follow the order of these * In general, VF driver initialization should roughly follow the order of
* opcodes. The VF driver must first validate the API version of the PF driver, * these opcodes. The VF driver must first validate the API version of the
* then request a reset, then get resources, then configure queues and * PF driver, then request a reset, then get resources, then configure
* interrupts. After these operations are complete, the VF driver may start * queues and interrupts. After these operations are complete, the VF
* its queues, optionally add MAC and VLAN filters, and process traffic. * driver may start its queues, optionally add MAC and VLAN filters, and
* process traffic.
*/
/* START GENERIC DEFINES
* Need to ensure the following enums and defines hold the same meaning and
* value in current and future projects
*/ */
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
VIRTCHNL_ERR_PARAM = -5,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
};
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */
/* deprecated with AVF 1.0 */
enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field /* Opcodes for VF-PF communication. These are placed in the v_opcode field
* of the virtchnl_msg structure. * of the virtchnl_msg structure.
*/ */
enum i40e_virtchnl_ops { enum virtchnl_ops {
/* The PF sends status change events to VFs using /* The PF sends status change events to VFs using
* the I40E_VIRTCHNL_OP_EVENT opcode. * the VIRTCHNL_OP_EVENT opcode.
* VFs send requests to the PF using the other ops. * VFs send requests to the PF using the other ops.
* Use of "advanced opcode" features must be negotiated as part of capabilities
* exchange and are not considered part of base mode feature set.
*/ */
I40E_VIRTCHNL_OP_UNKNOWN = 0, VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
I40E_VIRTCHNL_OP_RESET_VF = 2, VIRTCHNL_OP_RESET_VF = 2,
I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, VIRTCHNL_OP_GET_VF_RESOURCES = 3,
I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, VIRTCHNL_OP_ENABLE_QUEUES = 8,
I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, VIRTCHNL_OP_DISABLE_QUEUES = 9,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, VIRTCHNL_OP_ADD_ETH_ADDR = 10,
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, VIRTCHNL_OP_DEL_ETH_ADDR = 11,
I40E_VIRTCHNL_OP_ADD_VLAN = 12, VIRTCHNL_OP_ADD_VLAN = 12,
I40E_VIRTCHNL_OP_DEL_VLAN = 13, VIRTCHNL_OP_DEL_VLAN = 13,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15, VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_RSVD = 16, VIRTCHNL_OP_RSVD = 16,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
I40E_VIRTCHNL_OP_IWARP = 20, VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, VIRTCHNL_OP_SET_RSS_HENA = 26,
}; };
/* This macro is used to generate a compilation error if a structure
* is not exactly the correct length. It gives a divide by zero error if the
* structure is not of the correct size, otherwise it creates an enum that is
* never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
/* Virtual channel message descriptor. This overlays the admin queue /* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers. * descriptor. All other data is passed in external buffers.
*/ */
struct i40e_virtchnl_msg { struct virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
i40e_status v_retval; /* ditto for desc->retval */ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
u32 vfid; /* used by PF when sending to VF */ u32 vfid; /* used by PF when sending to VF */
}; };
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
/* Message descriptions and data structures.*/ /* Message descriptions and data structures.*/
/* I40E_VIRTCHNL_OP_VERSION /* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number * VF posts its version number to the PF. PF responds with its version number
* in the same format, along with a return code. * in the same format, along with a return code.
* Reply from PF has its major/minor versions also in param0 and param1. * Reply from PF has its major/minor versions also in param0 and param1.
...@@ -116,16 +170,21 @@ struct i40e_virtchnl_msg { ...@@ -116,16 +170,21 @@ struct i40e_virtchnl_msg {
* changes in the API. The PF must always respond to this message without * changes in the API. The PF must always respond to this message without
* error regardless of version mismatch. * error regardless of version mismatch.
*/ */
#define I40E_VIRTCHNL_VERSION_MAJOR 1 #define VIRTCHNL_VERSION_MAJOR 1
#define I40E_VIRTCHNL_VERSION_MINOR 1 #define VIRTCHNL_VERSION_MINOR 1
#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
struct i40e_virtchnl_version_info { struct virtchnl_version_info {
u32 major; u32 major;
u32 minor; u32 minor;
}; };
/* I40E_VIRTCHNL_OP_RESET_VF VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
/* VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters * VF sends this request to PF with no parameters
* PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
* until reset completion is indicated. The admin queue must be reinitialized * until reset completion is indicated. The admin queue must be reinitialized
...@@ -137,39 +196,56 @@ struct i40e_virtchnl_version_info { ...@@ -137,39 +196,56 @@ struct i40e_virtchnl_version_info {
* are cleared. * are cleared.
*/ */
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
* vsi_type should always be 6 for backward compatibility. Add other fields
* as needed.
*/
enum virtchnl_vsi_type {
VIRTCHNL_VSI_TYPE_INVALID = 0,
VIRTCHNL_VSI_SRIOV = 6,
};
/* VIRTCHNL_OP_GET_VF_RESOURCES
* Version 1.0 VF sends this request to PF with no parameters * Version 1.0 VF sends this request to PF with no parameters
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing * PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more * virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures. * virtchnl_vsi_resource structures.
*/ */
struct i40e_virtchnl_vsi_resource { struct virtchnl_vsi_resource {
u16 vsi_id; u16 vsi_id;
u16 num_queue_pairs; u16 num_queue_pairs;
enum i40e_vsi_type vsi_type; enum virtchnl_vsi_type vsi_type;
u16 qset_handle; u16 qset_handle;
u8 default_mac_addr[ETH_ALEN]; u8 default_mac_addr[ETH_ALEN];
}; };
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 /* VF offload flags
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 * TX/RX Checksum offloading and TSO for non-tunnelled packets.
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 */
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 #define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \ #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \ #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource { #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct virtchnl_vf_resource {
u16 num_vsis; u16 num_vsis;
u16 num_queue_pairs; u16 num_queue_pairs;
u16 max_vectors; u16 max_vectors;
...@@ -179,71 +255,84 @@ struct i40e_virtchnl_vf_resource { ...@@ -179,71 +255,84 @@ struct i40e_virtchnl_vf_resource {
u32 rss_key_size; u32 rss_key_size;
u32 rss_lut_size; u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1]; struct virtchnl_vsi_resource vsi_res[1];
}; };
/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue. * VF sends this message to set up parameters for one TX queue.
* External data buffer contains one instance of i40e_virtchnl_txq_info. * External data buffer contains one instance of virtchnl_txq_info.
* PF configures requested queue and returns a status code. * PF configures requested queue and returns a status code.
*/ */
/* Tx queue config info */ /* Tx queue config info */
struct i40e_virtchnl_txq_info { struct virtchnl_txq_info {
u16 vsi_id; u16 vsi_id;
u16 queue_id; u16 queue_id;
u16 ring_len; /* number of descriptors, multiple of 8 */ u16 ring_len; /* number of descriptors, multiple of 8 */
u16 headwb_enabled; u16 headwb_enabled; /* deprecated with AVF 1.0 */
u64 dma_ring_addr; u64 dma_ring_addr;
u64 dma_headwb_addr; u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
}; };
/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue. * VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of i40e_virtchnl_rxq_info. * External data buffer contains one instance of virtchnl_rxq_info.
* PF configures requested queue and returns a status code. * PF configures requested queue and returns a status code.
*/ */
/* Rx queue config info */ /* Rx queue config info */
struct i40e_virtchnl_rxq_info { struct virtchnl_rxq_info {
u16 vsi_id; u16 vsi_id;
u16 queue_id; u16 queue_id;
u32 ring_len; /* number of descriptors, multiple of 32 */ u32 ring_len; /* number of descriptors, multiple of 32 */
u16 hdr_size; u16 hdr_size;
u16 splithdr_enabled; u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size; u32 databuffer_size;
u32 max_pkt_size; u32 max_pkt_size;
u32 pad1;
u64 dma_ring_addr; u64 dma_ring_addr;
enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
u32 pad2;
}; };
/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
* VF sends this message to set parameters for all active TX and RX queues * VF sends this message to set parameters for all active TX and RX queues
* associated with the specified VSI. * associated with the specified VSI.
* PF configures queues and returns status. * PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues * If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured. * associated with the VSI, an error is returned and no queues are configured.
*/ */
struct i40e_virtchnl_queue_pair_info { struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */ /* NOTE: vsi_id and queue_id should be identical for both queues. */
struct i40e_virtchnl_txq_info txq; struct virtchnl_txq_info txq;
struct i40e_virtchnl_rxq_info rxq; struct virtchnl_rxq_info rxq;
}; };
struct i40e_virtchnl_vsi_queue_config_info { VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
struct virtchnl_vsi_queue_config_info {
u16 vsi_id; u16 vsi_id;
u16 num_queue_pairs; u16 num_queue_pairs;
struct i40e_virtchnl_queue_pair_info qpair[1]; u32 pad;
struct virtchnl_queue_pair_info qpair[1];
}; };
/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues. * VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues * The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector. * are to be associated with the specified vector.
* The "other" causes are always mapped to vector 0. * The "other" causes are always mapped to vector 0.
* PF configures interrupt mapping and returns status. * PF configures interrupt mapping and returns status.
*/ */
struct i40e_virtchnl_vector_map { struct virtchnl_vector_map {
u16 vsi_id; u16 vsi_id;
u16 vector_id; u16 vector_id;
u16 rxq_map; u16 rxq_map;
...@@ -252,142 +341,162 @@ struct i40e_virtchnl_vector_map { ...@@ -252,142 +341,162 @@ struct i40e_virtchnl_vector_map {
u16 txitr_idx; u16 txitr_idx;
}; };
struct i40e_virtchnl_irq_map_info { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors; u16 num_vectors;
struct i40e_virtchnl_vector_map vecmap[1]; struct virtchnl_vector_map vecmap[1];
}; };
/* I40E_VIRTCHNL_OP_ENABLE_QUEUES VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
* I40E_VIRTCHNL_OP_DISABLE_QUEUES
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
* VF sends these message to enable or disable TX/RX queue pairs. * VF sends these message to enable or disable TX/RX queue pairs.
* The queues fields are bitmaps indicating which queues to act upon. * The queues fields are bitmaps indicating which queues to act upon.
* (Currently, we only support 16 queues per VF, but we make the field * (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.) * u32 to allow for expansion.)
* PF performs requested action and returns status. * PF performs requested action and returns status.
*/ */
struct i40e_virtchnl_queue_select { struct virtchnl_queue_select {
u16 vsi_id; u16 vsi_id;
u16 pad; u16 pad;
u32 rx_queues; u32 rx_queues;
u32 tx_queues; u32 tx_queues;
}; };
/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
/* VIRTCHNL_OP_ADD_ETH_ADDR
* VF sends this message in order to add one or more unicast or multicast * VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI. * address filters for the specified VSI.
* PF adds the filters and returns status. * PF adds the filters and returns status.
*/ */
/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS /* VIRTCHNL_OP_DEL_ETH_ADDR
* VF sends this message in order to remove one or more unicast or multicast * VF sends this message in order to remove one or more unicast or multicast
* filters for the specified VSI. * filters for the specified VSI.
* PF removes the filters and returns status. * PF removes the filters and returns status.
*/ */
struct i40e_virtchnl_ether_addr { struct virtchnl_ether_addr {
u8 addr[ETH_ALEN]; u8 addr[ETH_ALEN];
u8 pad[2]; u8 pad[2];
}; };
struct i40e_virtchnl_ether_addr_list { VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id; u16 vsi_id;
u16 num_elements; u16 num_elements;
struct i40e_virtchnl_ether_addr list[1]; struct virtchnl_ether_addr list[1];
}; };
/* I40E_VIRTCHNL_OP_ADD_VLAN VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives. * VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status. * PF adds the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an * If a port VLAN is configured by the PF, this operation will return an
* error to the VF. * error to the VF.
*/ */
/* I40E_VIRTCHNL_OP_DEL_VLAN /* VIRTCHNL_OP_DEL_VLAN
* VF sends this message to remove one or more VLAN tag filters for receives. * VF sends this message to remove one or more VLAN tag filters for receives.
* PF removes the filters and returns status. * PF removes the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an * If a port VLAN is configured by the PF, this operation will return an
* error to the VF. * error to the VF.
*/ */
struct i40e_virtchnl_vlan_filter_list { struct virtchnl_vlan_filter_list {
u16 vsi_id; u16 vsi_id;
u16 num_elements; u16 num_elements;
u16 vlan_id[1]; u16 vlan_id[1];
}; };
/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags. * VF sends VSI id and flags.
* PF returns status code in retval. * PF returns status code in retval.
* Note: we assume that broadcast accept mode is always enabled. * Note: we assume that broadcast accept mode is always enabled.
*/ */
struct i40e_virtchnl_promisc_info { struct virtchnl_promisc_info {
u16 vsi_id; u16 vsi_id;
u16 flags; u16 flags;
}; };
#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
#define FLAG_VF_UNICAST_PROMISC 0x00000001
#define FLAG_VF_MULTICAST_PROMISC 0x00000002
/* I40E_VIRTCHNL_OP_GET_STATS /* VIRTCHNL_OP_GET_STATS
* VF sends this message to request stats for the selected VSI. VF uses * VF sends this message to request stats for the selected VSI. VF uses
* the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id * the virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF. * field is ignored by the PF.
* *
* PF replies with struct i40e_eth_stats in an external buffer. * PF replies with struct eth_stats in an external buffer.
*/ */
/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY /* VIRTCHNL_OP_CONFIG_RSS_KEY
* I40E_VIRTCHNL_OP_CONFIG_RSS_LUT * VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF * VF sends these messages to configure RSS. Only supported if both PF
* and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation. If this is the case, then the RSS fields in * configuration negotiation. If this is the case, then the RSS fields in
* the VF resource struct are valid. * the VF resource struct are valid.
* Both the key and LUT are initialized to 0 by the PF, meaning that * Both the key and LUT are initialized to 0 by the PF, meaning that
* RSS is effectively disabled until set up by the VF. * RSS is effectively disabled until set up by the VF.
*/ */
struct i40e_virtchnl_rss_key { struct virtchnl_rss_key {
u16 vsi_id; u16 vsi_id;
u16 key_len; u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */ u8 key[1]; /* RSS hash key, packed bytes */
}; };
struct i40e_virtchnl_rss_lut { VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
struct virtchnl_rss_lut {
u16 vsi_id; u16 vsi_id;
u16 lut_entries; u16 lut_entries;
u8 lut[1]; /* RSS lookup table*/ u8 lut[1]; /* RSS lookup table*/
}; };
/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
* I40E_VIRTCHNL_OP_SET_RSS_HENA
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS. * VF sends these messages to get and set the hash filter enable bits for RSS.
* By default, the PF sets these to all possible traffic types that the * By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the * hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware. * traffic types that are hashed by the hardware.
* Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
*/ */
struct i40e_virtchnl_rss_hena { struct virtchnl_rss_hena {
u64 hena; u64 hena;
}; };
/* I40E_VIRTCHNL_OP_EVENT VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it. * PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other * No direct response is expected from the VF, though it may generate other
* messages in response to this one. * messages in response to this one.
*/ */
enum i40e_virtchnl_event_codes { enum virtchnl_event_codes {
I40E_VIRTCHNL_EVENT_UNKNOWN = 0, VIRTCHNL_EVENT_UNKNOWN = 0,
I40E_VIRTCHNL_EVENT_LINK_CHANGE, VIRTCHNL_EVENT_LINK_CHANGE,
I40E_VIRTCHNL_EVENT_RESET_IMPENDING, VIRTCHNL_EVENT_RESET_IMPENDING,
I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
}; };
#define I40E_PF_EVENT_SEVERITY_INFO 0
#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct i40e_virtchnl_pf_event { #define PF_EVENT_SEVERITY_INFO 0
enum i40e_virtchnl_event_codes event; #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union { union {
struct { struct {
enum i40e_aq_link_speed link_speed; enum virtchnl_link_speed link_speed;
bool link_status; bool link_status;
} link_event; } link_event;
} event_data; } event_data;
...@@ -395,7 +504,9 @@ struct i40e_virtchnl_pf_event { ...@@ -395,7 +504,9 @@ struct i40e_virtchnl_pf_event {
int severity; int severity;
}; };
/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
* VF uses this message to request PF to map IWARP vectors to IWARP queues. * VF uses this message to request PF to map IWARP vectors to IWARP queues.
* The request for this originates from the VF IWARP driver through * The request for this originates from the VF IWARP driver through
* a client interface between VF LAN and VF IWARP driver. * a client interface between VF LAN and VF IWARP driver.
...@@ -407,28 +518,24 @@ struct i40e_virtchnl_pf_event { ...@@ -407,28 +518,24 @@ struct i40e_virtchnl_pf_event {
* PF configures interrupt mapping and returns status. * PF configures interrupt mapping and returns status.
*/ */
/* HW does not define a type value for AEQ; only for RX/TX and CEQ. struct virtchnl_iwarp_qv_info {
* In order for us to keep the interface simple, SW will define a
* unique type value for AEQ.
*/
#define I40E_QUEUE_TYPE_PE_AEQ 0x80
#define I40E_QUEUE_INVALID_IDX 0xFFFF
struct i40e_virtchnl_iwarp_qv_info {
u32 v_idx; /* msix_vector */ u32 v_idx; /* msix_vector */
u16 ceq_idx; u16 ceq_idx;
u16 aeq_idx; u16 aeq_idx;
u8 itr_idx; u8 itr_idx;
}; };
struct i40e_virtchnl_iwarp_qvlist_info { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
struct virtchnl_iwarp_qvlist_info {
u32 num_vectors; u32 num_vectors;
struct i40e_virtchnl_iwarp_qv_info qv_info[1]; struct virtchnl_iwarp_qv_info qv_info[1];
}; };
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
/* VF reset states - these are written into the RSTAT register: /* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF * VFGEN_RSTAT on the VF
* I40E_VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0 * When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1 * When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2 * When the PF detects that the VF has recovered, it writes 2
...@@ -438,11 +545,157 @@ struct i40e_virtchnl_iwarp_qvlist_info { ...@@ -438,11 +545,157 @@ struct i40e_virtchnl_iwarp_qvlist_info {
* is in a reset state, it will return DEADBEEF, which, when masked * is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3. * will result in 3.
*/ */
enum i40e_vfr_states { enum virtchnl_vfr_states {
I40E_VFR_INPROGRESS = 0, VIRTCHNL_VFR_INPROGRESS = 0,
I40E_VFR_COMPLETED, VIRTCHNL_VFR_COMPLETED,
I40E_VFR_VFACTIVE, VIRTCHNL_VFR_VFACTIVE,
I40E_VFR_UNKNOWN,
}; };
#endif /* _I40E_VIRTCHNL_H_ */ /**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
* @v_opcode: Opcode for the message
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* validate msg format against struct for each opcode
*/
static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
int valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct virtchnl_version_info);
break;
case VIRTCHNL_OP_RESET_VF:
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(ver))
valid_len = sizeof(u32);
break;
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct virtchnl_txq_info);
break;
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
valid_len += (vqc->num_queue_pairs *
sizeof(struct
virtchnl_queue_pair_info));
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct virtchnl_irq_map_info);
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
valid_len += (vimi->num_vectors *
sizeof(struct virtchnl_vector_map));
if (vimi->num_vectors == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
valid_len = sizeof(struct virtchnl_ether_addr_list);
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
valid_len += veal->num_elements *
sizeof(struct virtchnl_ether_addr);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
valid_len += vfl->num_elements * sizeof(u16);
if (vfl->num_elements == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct virtchnl_promisc_info);
break;
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_IWARP:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
*/
if (msglen)
valid_len = msglen;
else
err_msg_format = true;
break;
case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
break;
case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
if (msglen >= valid_len) {
struct virtchnl_iwarp_qvlist_info *qv =
(struct virtchnl_iwarp_qvlist_info *)msg;
if (qv->num_vectors == 0) {
err_msg_format = true;
break;
}
valid_len += ((qv->num_vectors - 1) *
sizeof(struct virtchnl_iwarp_qv_info));
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
valid_len = sizeof(struct virtchnl_rss_key);
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
valid_len += vrk->key_len - 1;
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
valid_len = sizeof(struct virtchnl_rss_lut);
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
valid_len += vrl->lut_entries - 1;
}
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena);
break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
return VIRTCHNL_ERR_PARAM;
}
/* few more checks */
if ((valid_len != msglen) || (err_msg_format))
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
}
#endif /* _VIRTCHNL_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment