Commit aa8a8b05 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2016-05-04

This series contains updates to ixgbe, ixgbevf and traffic class helpers.

Sridhar adds helper functions to the tc_mirred header to access tcf_mirred
information and then implements them for ixgbe to enable redirection to
a SRIOV VF or an offloaded MACVLAN device queue via tc 'mirred' action.

Amritha adds support to set filters with multiple header fields (L3,L4)
to match on.

KY Srinivasan from Microsoft add Hyper-V support into ixgbevf.

Emil adds 82599 sub-device IDs that were missing from the list of parts
that support WoL.  Then simplified the logic we use to determine WoL
support by reading the EEPROM bits for MACs X540 and newer.

Preethi cleaned up duplicate and unused device IDs.  Fixed our ethtool
stat reporting where we were ignoring higher 32 bits of stats registers,
so fill out 64 bit stat values into two 32 bit words.

Babu Moger from Oracle improves VF performance issues on SPARC.

Alex Duyck cleans up some of the Hyper-V implementation from KY so that
we can just use function pointers instead of having to identify if a
given VF is running on a Linux or Windows PF.

Usha makes sure that DCB and FCoE is disabled for X550EM_x/a MACs and
cleans up the DCB initialization in the process.

Tony cleans up the API for ixgbevf_update_xcast_mode() so we do not
have to pass in the netdev parameter, since it was never used in the
function.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3e66bab3 8b44a8a0
...@@ -644,6 +644,7 @@ struct ixgbe_adapter { ...@@ -644,6 +644,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
u32 flags2; u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE BIT(0) #define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
...@@ -792,7 +793,7 @@ struct ixgbe_adapter { ...@@ -792,7 +793,7 @@ struct ixgbe_adapter {
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
#define IXGBE_MAX_LINK_HANDLE 10 #define IXGBE_MAX_LINK_HANDLE 10
struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE]; struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
unsigned long tables; unsigned long tables;
/* maximum number of RETA entries among all devices supported by ixgbe /* maximum number of RETA entries among all devices supported by ixgbe
...@@ -895,7 +896,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); ...@@ -895,7 +896,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
void ixgbe_update_stats(struct ixgbe_adapter *adapter); void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id); u16 subdevice_id);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
......
...@@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev, ...@@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* Flow Control */ /* Flow Control */
regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); for (i = 0; i < 4; i++)
regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
switch (hw->mac.type) { switch (hw->mac.type) {
case ixgbe_mac_82598EB: case ixgbe_mac_82598EB:
...@@ -720,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev, ...@@ -720,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
...@@ -731,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev, ...@@ -731,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
regs_buff[961] = IXGBE_GET_STAT(adapter, tor); regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
...@@ -803,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev, ...@@ -803,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); for (i = 0; i < 4; i++)
regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); for (i = 0; i < 4; i++)
regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
......
...@@ -38,6 +38,12 @@ struct ixgbe_mat_field { ...@@ -38,6 +38,12 @@ struct ixgbe_mat_field {
unsigned int type; unsigned int type;
}; };
struct ixgbe_jump_table {
struct ixgbe_mat_field *mat;
struct ixgbe_fdir_filter *input;
union ixgbe_atr_input *mask;
};
static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask, union ixgbe_atr_input *mask,
u32 val, u32 m) u32 val, u32 m)
......
...@@ -59,8 +59,12 @@ ...@@ -59,8 +59,12 @@
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B #define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976
#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 #define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557 #define IXGBE_DEV_ID_82599EN_SFP 0x1557
...@@ -89,10 +93,6 @@ ...@@ -89,10 +93,6 @@
#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
/* VF Device IDs */ /* VF Device IDs */
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
#define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550_VF 0x1565
...@@ -548,6 +548,7 @@ struct ixgbe_thermal_sensor_data { ...@@ -548,6 +548,7 @@ struct ixgbe_thermal_sensor_data {
/* DCB registers */ /* DCB registers */
#define MAX_TRAFFIC_CLASS 8 #define MAX_TRAFFIC_CLASS 8
#define X540_TRAFFIC_CLASS 4 #define X540_TRAFFIC_CLASS 4
#define DEF_TRAFFIC_CLASS 1
#define IXGBE_RMCS 0x03D00 #define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40 #define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00 #define IXGBE_PDPMCS 0x0CD00
...@@ -1060,15 +1061,9 @@ struct ixgbe_thermal_sensor_data { ...@@ -1060,15 +1061,9 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) #define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
#define IXGBE_TDPROBE 0x07F20 #define IXGBE_TDPROBE 0x07F20
#define IXGBE_TXBUFCTRL 0x0C600 #define IXGBE_TXBUFCTRL 0x0C600
#define IXGBE_TXBUFDATA0 0x0C610 #define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_TXBUFDATA1 0x0C614
#define IXGBE_TXBUFDATA2 0x0C618
#define IXGBE_TXBUFDATA3 0x0C61C
#define IXGBE_RXBUFCTRL 0x03600 #define IXGBE_RXBUFCTRL 0x03600
#define IXGBE_RXBUFDATA0 0x03610 #define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_RXBUFDATA1 0x03614
#define IXGBE_RXBUFDATA2 0x03618
#define IXGBE_RXBUFDATA3 0x0361C
#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ #define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
#define IXGBE_RFVAL 0x050A4 #define IXGBE_RFVAL 0x050A4
#define IXGBE_MDFTC1 0x042B8 #define IXGBE_MDFTC1 0x042B8
......
...@@ -33,6 +33,11 @@ ...@@ -33,6 +33,11 @@
#define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_82599_VF_HV 0x152E
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
#define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8 #define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8 #define IXGBE_VF_MAX_RX_QUEUES 8
......
...@@ -450,9 +450,13 @@ enum ixbgevf_state_t { ...@@ -450,9 +450,13 @@ enum ixbgevf_state_t {
enum ixgbevf_boards { enum ixgbevf_boards {
board_82599_vf, board_82599_vf,
board_82599_vf_hv,
board_X540_vf, board_X540_vf,
board_X540_vf_hv,
board_X550_vf, board_X550_vf,
board_X550_vf_hv,
board_X550EM_x_vf, board_X550EM_x_vf,
board_X550EM_x_vf_hv,
}; };
enum ixgbevf_xcast_modes { enum ixgbevf_xcast_modes {
...@@ -467,6 +471,12 @@ extern const struct ixgbevf_info ixgbevf_X550_vf_info; ...@@ -467,6 +471,12 @@ extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
/* needed by ethtool.c */ /* needed by ethtool.c */
extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[]; extern const char ixgbevf_driver_version[];
......
...@@ -63,9 +63,13 @@ static char ixgbevf_copyright[] = ...@@ -63,9 +63,13 @@ static char ixgbevf_copyright[] =
static const struct ixgbevf_info *ixgbevf_info_tbl[] = { static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info, [board_82599_vf] = &ixgbevf_82599_vf_info,
[board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
[board_X540_vf] = &ixgbevf_X540_vf_info, [board_X540_vf] = &ixgbevf_X540_vf_info,
[board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
[board_X550_vf] = &ixgbevf_X550_vf_info, [board_X550_vf] = &ixgbevf_X550_vf_info,
[board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
}; };
/* ixgbevf_pci_tbl - PCI Device ID Table /* ixgbevf_pci_tbl - PCI Device ID Table
...@@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { ...@@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
*/ */
static const struct pci_device_id ixgbevf_pci_tbl[] = { static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
/* required last entry */ /* required last entry */
{0, } {0, }
}; };
...@@ -1752,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ...@@ -1752,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_rx_desc)); ring->count * sizeof(union ixgbe_adv_rx_desc));
#ifndef CONFIG_SPARC
/* enable relaxed ordering */ /* enable relaxed ordering */
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
IXGBE_DCA_RXCTRL_DESC_RRO_EN); IXGBE_DCA_RXCTRL_DESC_RRO_EN);
#else
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
IXGBE_DCA_RXCTRL_DESC_RRO_EN |
IXGBE_DCA_RXCTRL_DATA_WRO_EN);
#endif
/* reset head and tail pointers */ /* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
...@@ -1795,7 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ...@@ -1795,7 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
ixgbevf_setup_vfmrqc(adapter); ixgbevf_setup_vfmrqc(adapter);
/* notify the PF of our intent to use this size of frame */ /* notify the PF of our intent to use this size of frame */
ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring * the Base and Length of the Rx Descriptor Ring
...@@ -1908,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) ...@@ -1908,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode); hw->mac.ops.update_xcast_mode(hw, xcast_mode);
/* reprogram multicast list */ /* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev); hw->mac.ops.update_mc_addr_list(hw, netdev);
...@@ -3740,7 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3740,7 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
/* notify the PF of our intent to use this size of frame */ /* notify the PF of our intent to use this size of frame */
ixgbevf_rlpml_set_vf(hw, max_frame); hw->mac.ops.set_rlpml(hw, max_frame);
return 0; return 0;
} }
......
...@@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { ...@@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.check_for_rst = ixgbevf_check_for_rst_vf, .check_for_rst = ixgbevf_check_for_rst_vf,
}; };
/* Mailbox operations when running on Hyper-V.
* On Hyper-V, PF/VF communication is not through the
* hardware mailbox; this communication is through
* a software mediated path.
* Most mail box operations are noop while running on
* Hyper-V.
*/
const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
.init_params = ixgbevf_init_mbx_params_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
};
...@@ -27,6 +27,12 @@ ...@@ -27,6 +27,12 @@
#include "vf.h" #include "vf.h"
#include "ixgbevf.h" #include "ixgbevf.h"
/* On Hyper-V, to reset, we need to read from this offset
* from the PCI config space. This is the mechanism used on
* Hyper-V to support PF/VF communication.
*/
#define IXGBE_HV_RESET_OFFSET 0x201
/** /**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
...@@ -125,6 +131,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) ...@@ -125,6 +131,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
return 0; return 0;
} }
/**
* Hyper-V variant; the VF/PF communication is through the PCI
* config space.
*/
static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
{
#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
struct ixgbevf_adapter *adapter = hw->back;
int i;
for (i = 0; i < 6; i++)
pci_read_config_byte(adapter->pdev,
(i + IXGBE_HV_RESET_OFFSET),
&hw->mac.perm_addr[i]);
return 0;
#else
pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
return -EOPNOTSUPP;
#endif
}
/** /**
* ixgbevf_stop_hw_vf - Generic stop Tx/Rx units * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
...@@ -258,6 +285,11 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) ...@@ -258,6 +285,11 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
return ret_val; return ret_val;
} }
static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
{
return -EOPNOTSUPP;
}
/** /**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
* @adapter: pointer to the port handle * @adapter: pointer to the port handle
...@@ -416,6 +448,26 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, ...@@ -416,6 +448,26 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
return ret_val; return ret_val;
} }
/**
* ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
* @hw: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @vmdq: Unused in this implementation
*
* We don't really allow setting the device MAC address. However,
* if the address being set is the permanent MAC address we will
* permit that.
**/
static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
u32 vmdq)
{
if (ether_addr_equal(addr, hw->mac.perm_addr))
return 0;
return -EOPNOTSUPP;
}
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
u32 *msg, u16 size) u32 *msg, u16 size)
{ {
...@@ -472,16 +524,23 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, ...@@ -472,16 +524,23 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
return 0; return 0;
} }
/**
* Hyper-V variant - just a stub.
*/
static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
{
return -EOPNOTSUPP;
}
/** /**
* ixgbevf_update_xcast_mode - Update Multicast mode * ixgbevf_update_xcast_mode - Update Multicast mode
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @netdev: pointer to net device structure
* @xcast_mode: new multicast mode * @xcast_mode: new multicast mode
* *
* Updates the Multicast Mode of VF. * Updates the Multicast Mode of VF.
**/ **/
static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
struct net_device *netdev, int xcast_mode)
{ {
struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2]; u32 msgbuf[2];
...@@ -512,6 +571,14 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, ...@@ -512,6 +571,14 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
return 0; return 0;
} }
/**
* Hyper-V variant - just a stub.
*/
static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
return -EOPNOTSUPP;
}
/** /**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
...@@ -550,6 +617,15 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, ...@@ -550,6 +617,15 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
return err; return err;
} }
/**
* Hyper-V variant - just a stub.
*/
static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
return -EOPNOTSUPP;
}
/** /**
* ixgbevf_setup_mac_link_vf - Setup MAC link settings * ixgbevf_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
...@@ -656,11 +732,72 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, ...@@ -656,11 +732,72 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
} }
/** /**
* ixgbevf_rlpml_set_vf - Set the maximum receive packet length * Hyper-V variant; there is no mailbox communication.
*/
static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool autoneg_wait_to_complete)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
u32 links_reg;
/* If we were hit with a reset drop the link */
if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
mac->get_link_status = true;
if (!mac->get_link_status)
goto out;
/* if link status is down no point in checking to see if pf is up */
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
if (mac->type == ixgbe_mac_82599_vf) {
int i;
for (i = 0; i < 5; i++) {
udelay(100);
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
}
}
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
break;
}
/* if we passed all the tests above then the link is up and we no
* longer need to check for link
*/
mac->get_link_status = false;
out:
*link_up = !mac->get_link_status;
return 0;
}
/**
* ixgbevf_set_rlpml_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @max_size: value to assign to max frame size * @max_size: value to assign to max frame size
**/ **/
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
{ {
u32 msgbuf[2]; u32 msgbuf[2];
...@@ -669,6 +806,25 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) ...@@ -669,6 +806,25 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
ixgbevf_write_msg_read_ack(hw, msgbuf, 2); ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
} }
/**
* ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
* Hyper-V variant.
**/
static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 reg;
/* If we are on Hyper-V, we implement this functionality
* differently.
*/
reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
/* CRC == 4 */
reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
}
/** /**
* ixgbevf_negotiate_api_version_vf - Negotiate supported API version * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
...@@ -703,6 +859,21 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) ...@@ -703,6 +859,21 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
return err; return err;
} }
/**
* ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
* @hw: pointer to the HW structure
* @api: integer containing requested API version
* Hyper-V version - only ixgbe_mbox_api_10 supported.
**/
static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
{
/* Hyper-V only supports api version ixgbe_mbox_api_10 */
if (api != ixgbe_mbox_api_10)
return IXGBE_ERR_INVALID_ARGUMENT;
return 0;
}
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc) unsigned int *default_tc)
{ {
...@@ -775,6 +946,24 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { ...@@ -775,6 +946,24 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.update_xcast_mode = ixgbevf_update_xcast_mode, .update_xcast_mode = ixgbevf_update_xcast_mode,
.set_uc_addr = ixgbevf_set_uc_addr_vf, .set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf, .set_vfta = ixgbevf_set_vfta_vf,
.set_rlpml = ixgbevf_set_rlpml_vf,
};
static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_hv_reset_hw_vf,
.start_hw = ixgbevf_start_hw_vf,
.get_mac_addr = ixgbevf_get_mac_addr_vf,
.stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_hv_check_mac_link_vf,
.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
.set_rar = ixgbevf_hv_set_rar_vf,
.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_hv_update_xcast_mode,
.set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
.set_vfta = ixgbevf_hv_set_vfta_vf,
.set_rlpml = ixgbevf_hv_set_rlpml_vf,
}; };
const struct ixgbevf_info ixgbevf_82599_vf_info = { const struct ixgbevf_info ixgbevf_82599_vf_info = {
...@@ -782,17 +971,37 @@ const struct ixgbevf_info ixgbevf_82599_vf_info = { ...@@ -782,17 +971,37 @@ const struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac_ops = &ixgbevf_mac_ops, .mac_ops = &ixgbevf_mac_ops,
}; };
const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
.mac = ixgbe_mac_82599_vf,
.mac_ops = &ixgbevf_hv_mac_ops,
};
const struct ixgbevf_info ixgbevf_X540_vf_info = { const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf, .mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops, .mac_ops = &ixgbevf_mac_ops,
}; };
const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_hv_mac_ops,
};
const struct ixgbevf_info ixgbevf_X550_vf_info = { const struct ixgbevf_info ixgbevf_X550_vf_info = {
.mac = ixgbe_mac_X550_vf, .mac = ixgbe_mac_X550_vf,
.mac_ops = &ixgbevf_mac_ops, .mac_ops = &ixgbevf_mac_ops,
}; };
const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
.mac = ixgbe_mac_X550_vf,
.mac_ops = &ixgbevf_hv_mac_ops,
};
const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
.mac = ixgbe_mac_X550EM_x_vf, .mac = ixgbe_mac_X550EM_x_vf,
.mac_ops = &ixgbevf_mac_ops, .mac_ops = &ixgbevf_mac_ops,
}; };
const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
.mac = ixgbe_mac_X550EM_x_vf,
.mac_ops = &ixgbevf_hv_mac_ops,
};
...@@ -64,11 +64,12 @@ struct ixgbe_mac_operations { ...@@ -64,11 +64,12 @@ struct ixgbe_mac_operations {
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int); s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
s32 (*enable_mc)(struct ixgbe_hw *); s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
void (*set_rlpml)(struct ixgbe_hw *, u16);
}; };
enum ixgbe_mac_type { enum ixgbe_mac_type {
...@@ -208,7 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, ...@@ -208,7 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc); unsigned int *default_tc);
int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __NET_TC_MIR_H #define __NET_TC_MIR_H
#include <net/act_api.h> #include <net/act_api.h>
#include <linux/tc_act/tc_mirred.h>
struct tcf_mirred { struct tcf_mirred {
struct tcf_common common; struct tcf_common common;
...@@ -14,4 +15,18 @@ struct tcf_mirred { ...@@ -14,4 +15,18 @@ struct tcf_mirred {
#define to_mirred(a) \ #define to_mirred(a) \
container_of(a->priv, struct tcf_mirred, common) container_of(a->priv, struct tcf_mirred, common)
static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->type == TCA_ACT_MIRRED)
return to_mirred(a)->tcfm_eaction == TCA_EGRESS_REDIR;
#endif
return false;
}
static inline int tcf_mirred_ifindex(const struct tc_action *a)
{
return to_mirred(a)->tcfm_ifindex;
}
#endif /* __NET_TC_MIR_H */ #endif /* __NET_TC_MIR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment