Commit e3511997 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'add-flow-director-for-txgbe'

Jiawen Wu says:

====================
add flow director for txgbe

Add flow director support for Wangxun 10Gb NICs.

v2 -> v3: https://lore.kernel.org/all/20240605020852.24144-1-jiawenwu@trustnetic.com/
- Wrap the code at 80 chars where possible. (Jakub Kicinski)
- Add function description address on kernel-doc. (Jakub Kicinski)
- Correct return code. (Simon Horman)
- Remove redundant size check. (Hariprasad Kelam)

v1 -> v2: https://lore.kernel.org/all/20240529093821.27108-1-jiawenwu@trustnetic.com/
- Fix build warnings reported by kernel test robot.
====================

Link: https://lore.kernel.org/r/20240618101609.3580-1-jiawenwu@trustnetic.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 7e8fcb81 34744a77
...@@ -43,6 +43,11 @@ static const struct wx_stats wx_gstrings_stats[] = { ...@@ -43,6 +43,11 @@ static const struct wx_stats wx_gstrings_stats[] = {
WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
}; };
static const struct wx_stats wx_gstrings_fdir_stats[] = {
WX_STAT("fdir_match", stats.fdirmatch),
WX_STAT("fdir_miss", stats.fdirmiss),
};
/* drivers allocates num_tx_queues and num_rx_queues symmetrically so /* drivers allocates num_tx_queues and num_rx_queues symmetrically so
* we set the num_rx_queues to evaluate to num_tx_queues. This is * we set the num_rx_queues to evaluate to num_tx_queues. This is
* used because we do not have a good way to get the max number of * used because we do not have a good way to get the max number of
...@@ -55,13 +60,17 @@ static const struct wx_stats wx_gstrings_stats[] = { ...@@ -55,13 +60,17 @@ static const struct wx_stats wx_gstrings_stats[] = {
(WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
(sizeof(struct wx_queue_stats) / sizeof(u64))) (sizeof(struct wx_queue_stats) / sizeof(u64)))
#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats)
#define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats)
#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
int wx_get_sset_count(struct net_device *netdev, int sset) int wx_get_sset_count(struct net_device *netdev, int sset)
{ {
struct wx *wx = netdev_priv(netdev);
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
return WX_STATS_LEN; return (wx->mac.type == wx_mac_sp) ?
WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -70,6 +79,7 @@ EXPORT_SYMBOL(wx_get_sset_count); ...@@ -70,6 +79,7 @@ EXPORT_SYMBOL(wx_get_sset_count);
void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{ {
struct wx *wx = netdev_priv(netdev);
u8 *p = data; u8 *p = data;
int i; int i;
...@@ -77,6 +87,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -77,6 +87,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS: case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_stats[i].stat_string); ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
if (wx->mac.type == wx_mac_sp) {
for (i = 0; i < WX_FDIR_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
}
for (i = 0; i < netdev->num_tx_queues; i++) { for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
...@@ -96,7 +110,7 @@ void wx_get_ethtool_stats(struct net_device *netdev, ...@@ -96,7 +110,7 @@ void wx_get_ethtool_stats(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev); struct wx *wx = netdev_priv(netdev);
struct wx_ring *ring; struct wx_ring *ring;
unsigned int start; unsigned int start;
int i, j; int i, j, k;
char *p; char *p;
wx_update_stats(wx); wx_update_stats(wx);
...@@ -107,6 +121,13 @@ void wx_get_ethtool_stats(struct net_device *netdev, ...@@ -107,6 +121,13 @@ void wx_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
if (wx->mac.type == wx_mac_sp) {
for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
data[i++] = *(u64 *)p;
}
}
for (j = 0; j < netdev->num_tx_queues; j++) { for (j = 0; j < netdev->num_tx_queues; j++) {
ring = wx->tx_ring[j]; ring = wx->tx_ring[j];
if (!ring) { if (!ring) {
...@@ -172,17 +193,21 @@ EXPORT_SYMBOL(wx_get_pause_stats); ...@@ -172,17 +193,21 @@ EXPORT_SYMBOL(wx_get_pause_stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{ {
unsigned int stats_len = WX_STATS_LEN;
struct wx *wx = netdev_priv(netdev); struct wx *wx = netdev_priv(netdev);
if (wx->mac.type == wx_mac_sp)
stats_len += WX_FDIR_STATS_LEN;
strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->driver, wx->driver_name, sizeof(info->driver));
strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
info->n_stats = WX_STATS_LEN - info->n_stats = stats_len -
(WX_NUM_TX_QUEUES - wx->num_tx_queues) * (WX_NUM_TX_QUEUES - wx->num_tx_queues) *
(sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
} else { } else {
info->n_stats = WX_STATS_LEN; info->n_stats = stats_len;
} }
} }
EXPORT_SYMBOL(wx_get_drvinfo); EXPORT_SYMBOL(wx_get_drvinfo);
...@@ -383,6 +408,9 @@ void wx_get_channels(struct net_device *dev, ...@@ -383,6 +408,9 @@ void wx_get_channels(struct net_device *dev,
/* record RSS queues */ /* record RSS queues */
ch->combined_count = wx->ring_feature[RING_F_RSS].indices; ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
ch->combined_count = wx->ring_feature[RING_F_FDIR].indices;
} }
EXPORT_SYMBOL(wx_get_channels); EXPORT_SYMBOL(wx_get_channels);
...@@ -400,6 +428,9 @@ int wx_set_channels(struct net_device *dev, ...@@ -400,6 +428,9 @@ int wx_set_channels(struct net_device *dev,
if (count > wx_max_channels(wx)) if (count > wx_max_channels(wx))
return -EINVAL; return -EINVAL;
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
wx->ring_feature[RING_F_FDIR].limit = count;
wx->ring_feature[RING_F_RSS].limit = count; wx->ring_feature[RING_F_RSS].limit = count;
return 0; return 0;
......
...@@ -1147,8 +1147,15 @@ static void wx_enable_rx(struct wx *wx) ...@@ -1147,8 +1147,15 @@ static void wx_enable_rx(struct wx *wx)
static void wx_set_rxpba(struct wx *wx) static void wx_set_rxpba(struct wx *wx)
{ {
u32 rxpktsize, txpktsize, txpbthresh; u32 rxpktsize, txpktsize, txpbthresh;
u32 pbsize = wx->mac.rx_pb_size;
rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT; if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) ||
test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
pbsize -= 64; /* Default 64KB */
}
rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT;
wr32(wx, WX_RDB_PB_SZ(0), rxpktsize); wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
/* Only support an equally distributed Tx packet buffer strategy. */ /* Only support an equally distributed Tx packet buffer strategy. */
...@@ -1261,7 +1268,7 @@ static void wx_configure_port(struct wx *wx) ...@@ -1261,7 +1268,7 @@ static void wx_configure_port(struct wx *wx)
* Stops the receive data path and waits for the HW to internally empty * Stops the receive data path and waits for the HW to internally empty
* the Rx security block * the Rx security block
**/ **/
static int wx_disable_sec_rx_path(struct wx *wx) int wx_disable_sec_rx_path(struct wx *wx)
{ {
u32 secrx; u32 secrx;
...@@ -1271,6 +1278,7 @@ static int wx_disable_sec_rx_path(struct wx *wx) ...@@ -1271,6 +1278,7 @@ static int wx_disable_sec_rx_path(struct wx *wx)
return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY, return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
1000, 40000, false, wx, WX_RSC_ST); 1000, 40000, false, wx, WX_RSC_ST);
} }
EXPORT_SYMBOL(wx_disable_sec_rx_path);
/** /**
* wx_enable_sec_rx_path - Enables the receive data path * wx_enable_sec_rx_path - Enables the receive data path
...@@ -1278,11 +1286,12 @@ static int wx_disable_sec_rx_path(struct wx *wx) ...@@ -1278,11 +1286,12 @@ static int wx_disable_sec_rx_path(struct wx *wx)
* *
* Enables the receive data path. * Enables the receive data path.
**/ **/
static void wx_enable_sec_rx_path(struct wx *wx) void wx_enable_sec_rx_path(struct wx *wx)
{ {
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0); wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
WX_WRITE_FLUSH(wx); WX_WRITE_FLUSH(wx);
} }
EXPORT_SYMBOL(wx_enable_sec_rx_path);
static void wx_vlan_strip_control(struct wx *wx, bool enable) static void wx_vlan_strip_control(struct wx *wx, bool enable)
{ {
...@@ -1499,6 +1508,13 @@ static void wx_configure_tx_ring(struct wx *wx, ...@@ -1499,6 +1508,13 @@ static void wx_configure_tx_ring(struct wx *wx,
txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT; txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT; txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
ring->atr_count = 0;
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) &&
test_bit(WX_FLAG_FDIR_HASH, wx->flags))
ring->atr_sample_rate = wx->atr_sample_rate;
else
ring->atr_sample_rate = 0;
/* reinitialize tx_buffer_info */ /* reinitialize tx_buffer_info */
memset(ring->tx_buffer_info, 0, memset(ring->tx_buffer_info, 0,
sizeof(struct wx_tx_buffer) * ring->count); sizeof(struct wx_tx_buffer) * ring->count);
...@@ -1732,7 +1748,9 @@ void wx_configure(struct wx *wx) ...@@ -1732,7 +1748,9 @@ void wx_configure(struct wx *wx)
wx_set_rx_mode(wx->netdev); wx_set_rx_mode(wx->netdev);
wx_restore_vlan(wx); wx_restore_vlan(wx);
wx_enable_sec_rx_path(wx);
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
wx->configure_fdir(wx);
wx_configure_tx(wx); wx_configure_tx(wx);
wx_configure_rx(wx); wx_configure_rx(wx);
...@@ -1959,6 +1977,7 @@ int wx_sw_init(struct wx *wx) ...@@ -1959,6 +1977,7 @@ int wx_sw_init(struct wx *wx)
} }
bitmap_zero(wx->state, WX_STATE_NBITS); bitmap_zero(wx->state, WX_STATE_NBITS);
bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS);
return 0; return 0;
} }
...@@ -2333,6 +2352,11 @@ void wx_update_stats(struct wx *wx) ...@@ -2333,6 +2352,11 @@ void wx_update_stats(struct wx *wx)
hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
if (wx->mac.type == wx_mac_sp) {
hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
for (i = 0; i < wx->mac.max_rx_queues; i++) for (i = 0; i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
} }
......
...@@ -28,6 +28,8 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr); ...@@ -28,6 +28,8 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
void wx_flush_sw_mac_table(struct wx *wx); void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p); int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx); void wx_disable_rx(struct wx *wx);
int wx_disable_sec_rx_path(struct wx *wx);
void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev); void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu); int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
......
...@@ -148,10 +148,11 @@ static struct wx_dec_ptype wx_ptype_lookup[256] = { ...@@ -148,10 +148,11 @@ static struct wx_dec_ptype wx_ptype_lookup[256] = {
[0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4), [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4),
}; };
static struct wx_dec_ptype wx_decode_ptype(const u8 ptype) struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
{ {
return wx_ptype_lookup[ptype]; return wx_ptype_lookup[ptype];
} }
EXPORT_SYMBOL(wx_decode_ptype);
/* wx_test_staterr - tests bits in Rx descriptor status and error fields */ /* wx_test_staterr - tests bits in Rx descriptor status and error fields */
static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
...@@ -1453,6 +1454,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, ...@@ -1453,6 +1454,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
struct wx_ring *tx_ring) struct wx_ring *tx_ring)
{ {
struct wx *wx = netdev_priv(tx_ring->netdev);
u16 count = TXD_USE_COUNT(skb_headlen(skb)); u16 count = TXD_USE_COUNT(skb_headlen(skb));
struct wx_tx_buffer *first; struct wx_tx_buffer *first;
u8 hdr_len = 0, ptype; u8 hdr_len = 0, ptype;
...@@ -1498,6 +1500,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, ...@@ -1498,6 +1500,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
goto out_drop; goto out_drop;
else if (!tso) else if (!tso)
wx_tx_csum(tx_ring, first, ptype); wx_tx_csum(tx_ring, first, ptype);
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
wx->atr(tx_ring, first, ptype);
wx_tx_map(tx_ring, first, hdr_len); wx_tx_map(tx_ring, first, hdr_len);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -1574,8 +1580,27 @@ static void wx_set_rss_queues(struct wx *wx) ...@@ -1574,8 +1580,27 @@ static void wx_set_rss_queues(struct wx *wx)
f = &wx->ring_feature[RING_F_RSS]; f = &wx->ring_feature[RING_F_RSS];
f->indices = f->limit; f->indices = f->limit;
wx->num_rx_queues = f->limit; if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
wx->num_tx_queues = f->limit; goto out;
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
/* Use Flow Director in addition to RSS to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
*/
if (f->indices > 1) {
f = &wx->ring_feature[RING_F_FDIR];
f->indices = f->limit;
if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
set_bit(WX_FLAG_FDIR_HASH, wx->flags);
}
out:
wx->num_rx_queues = f->indices;
wx->num_tx_queues = f->indices;
} }
static void wx_set_num_queues(struct wx *wx) static void wx_set_num_queues(struct wx *wx)
...@@ -2680,6 +2705,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -2680,6 +2705,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
{ {
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev); struct wx *wx = netdev_priv(netdev);
bool need_reset = false;
if (features & NETIF_F_RXHASH) { if (features & NETIF_F_RXHASH) {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
...@@ -2697,6 +2723,36 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -2697,6 +2723,36 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev); wx_set_rx_mode(netdev);
if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
return 0;
/* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
switch (features & NETIF_F_NTUPLE) {
case NETIF_F_NTUPLE:
/* turn off ATR, enable perfect filters and reset */
if (!(test_and_set_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
need_reset = true;
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
break;
default:
/* turn off perfect filters, enable ATR and reset */
if (test_and_clear_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
need_reset = true;
/* We cannot enable ATR if RSS is disabled */
if (wx->ring_feature[RING_F_RSS].limit <= 1)
break;
set_bit(WX_FLAG_FDIR_HASH, wx->flags);
break;
}
if (need_reset)
wx->do_reset(netdev);
return 0; return 0;
} }
EXPORT_SYMBOL(wx_set_features); EXPORT_SYMBOL(wx_set_features);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef _WX_LIB_H_ #ifndef _WX_LIB_H_
#define _WX_LIB_H_ #define _WX_LIB_H_
struct wx_dec_ptype wx_decode_ptype(const u8 ptype);
void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count); void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count);
u16 wx_desc_unused(struct wx_ring *ring); u16 wx_desc_unused(struct wx_ring *ring);
netdev_tx_t wx_xmit_frame(struct sk_buff *skb, netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
......
...@@ -157,6 +157,8 @@ ...@@ -157,6 +157,8 @@
#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21) #define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22) #define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23) #define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
#define WX_RDB_FDIR_MATCH 0x19558
#define WX_RDB_FDIR_MISS 0x1955C
/******************************* PSR Registers *******************************/ /******************************* PSR Registers *******************************/
/* psr control */ /* psr control */
...@@ -503,6 +505,34 @@ enum WX_MSCA_CMD_value { ...@@ -503,6 +505,34 @@ enum WX_MSCA_CMD_value {
#define WX_PTYPE_TYP_TCP 0x04 #define WX_PTYPE_TYP_TCP 0x04
#define WX_PTYPE_TYP_SCTP 0x05 #define WX_PTYPE_TYP_SCTP 0x05
/* Packet type non-ip values */
enum wx_l2_ptypes {
WX_PTYPE_L2_ABORTED = (WX_PTYPE_PKT_MAC),
WX_PTYPE_L2_MAC = (WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC),
WX_PTYPE_L2_IPV4_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IPFRAG),
WX_PTYPE_L2_IPV4 = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IP),
WX_PTYPE_L2_IPV4_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_UDP),
WX_PTYPE_L2_IPV4_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_TCP),
WX_PTYPE_L2_IPV4_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_SCTP),
WX_PTYPE_L2_IPV6_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
WX_PTYPE_TYP_IPFRAG),
WX_PTYPE_L2_IPV6 = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
WX_PTYPE_TYP_IP),
WX_PTYPE_L2_IPV6_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
WX_PTYPE_TYP_UDP),
WX_PTYPE_L2_IPV6_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
WX_PTYPE_TYP_TCP),
WX_PTYPE_L2_IPV6_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
WX_PTYPE_TYP_SCTP),
WX_PTYPE_L2_TUN4_MAC = (WX_PTYPE_TUN_IPV4 | WX_PTYPE_PKT_IGM),
WX_PTYPE_L2_TUN6_MAC = (WX_PTYPE_TUN_IPV6 | WX_PTYPE_PKT_IGM),
};
#define WX_PTYPE_PKT(_pt) ((_pt) & 0x30)
#define WX_PTYPE_TYPL4(_pt) ((_pt) & 0x07)
#define WX_RXD_PKTTYPE(_rxd) \ #define WX_RXD_PKTTYPE(_rxd) \
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
#define WX_RXD_IPV6EX(_rxd) \ #define WX_RXD_IPV6EX(_rxd) \
...@@ -552,6 +582,9 @@ enum wx_tx_flags { ...@@ -552,6 +582,9 @@ enum wx_tx_flags {
WX_TX_FLAGS_OUTER_IPV4 = 0x100, WX_TX_FLAGS_OUTER_IPV4 = 0x100,
WX_TX_FLAGS_LINKSEC = 0x200, WX_TX_FLAGS_LINKSEC = 0x200,
WX_TX_FLAGS_IPSEC = 0x400, WX_TX_FLAGS_IPSEC = 0x400,
/* software defined flags */
WX_TX_FLAGS_SW_VLAN = 0x40,
}; };
/* VLAN info */ /* VLAN info */
...@@ -900,7 +933,13 @@ struct wx_ring { ...@@ -900,7 +933,13 @@ struct wx_ring {
*/ */
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
u16 next_to_alloc; union {
u16 next_to_alloc;
struct {
u8 atr_sample_rate;
u8 atr_count;
};
};
struct wx_queue_stats stats; struct wx_queue_stats stats;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
...@@ -939,6 +978,7 @@ struct wx_ring_feature { ...@@ -939,6 +978,7 @@ struct wx_ring_feature {
enum wx_ring_f_enum { enum wx_ring_f_enum {
RING_F_NONE = 0, RING_F_NONE = 0,
RING_F_RSS, RING_F_RSS,
RING_F_FDIR,
RING_F_ARRAY_SIZE /* must be last in enum set */ RING_F_ARRAY_SIZE /* must be last in enum set */
}; };
...@@ -980,15 +1020,26 @@ struct wx_hw_stats { ...@@ -980,15 +1020,26 @@ struct wx_hw_stats {
u64 crcerrs; u64 crcerrs;
u64 rlec; u64 rlec;
u64 qmprc; u64 qmprc;
u64 fdirmatch;
u64 fdirmiss;
}; };
enum wx_state { enum wx_state {
WX_STATE_RESETTING, WX_STATE_RESETTING,
WX_STATE_NBITS, /* must be last */ WX_STATE_NBITS, /* must be last */
}; };
enum wx_pf_flags {
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
WX_PF_FLAGS_NBITS /* must be last */
};
struct wx { struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_BITMAP(state, WX_STATE_NBITS); DECLARE_BITMAP(state, WX_STATE_NBITS);
DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
void *priv; void *priv;
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
...@@ -1077,6 +1128,9 @@ struct wx { ...@@ -1077,6 +1128,9 @@ struct wx {
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed; u64 alloc_rx_buff_failed;
u32 atr_sample_rate;
void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
void (*configure_fdir)(struct wx *wx);
void (*do_reset)(struct net_device *netdev); void (*do_reset)(struct net_device *netdev);
}; };
......
...@@ -10,4 +10,5 @@ txgbe-objs := txgbe_main.o \ ...@@ -10,4 +10,5 @@ txgbe-objs := txgbe_main.o \
txgbe_hw.o \ txgbe_hw.o \
txgbe_phy.o \ txgbe_phy.o \
txgbe_irq.o \ txgbe_irq.o \
txgbe_fdir.o \
txgbe_ethtool.o txgbe_ethtool.o
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
#ifndef _TXGBE_FDIR_H_
#define _TXGBE_FDIR_H_
void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input,
union txgbe_atr_input *input_mask);
void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask);
int txgbe_fdir_write_perfect_filter(struct wx *wx,
union txgbe_atr_input *input,
u16 soft_id, u8 queue);
int txgbe_fdir_erase_perfect_filter(struct wx *wx,
union txgbe_atr_input *input,
u16 soft_id);
void txgbe_configure_fdir(struct wx *wx);
void txgbe_fdir_filter_exit(struct wx *wx);
#endif /* _TXGBE_FDIR_H_ */
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "txgbe_hw.h" #include "txgbe_hw.h"
#include "txgbe_phy.h" #include "txgbe_phy.h"
#include "txgbe_irq.h" #include "txgbe_irq.h"
#include "txgbe_fdir.h"
#include "txgbe_ethtool.h" #include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe"; char txgbe_driver_name[] = "txgbe";
...@@ -257,6 +258,14 @@ static int txgbe_sw_init(struct wx *wx) ...@@ -257,6 +258,14 @@ static int txgbe_sw_init(struct wx *wx)
num_online_cpus()); num_online_cpus());
wx->rss_enabled = true; wx->rss_enabled = true;
wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES,
num_online_cpus());
set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags);
set_bit(WX_FLAG_FDIR_HASH, wx->flags);
wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE;
wx->atr = txgbe_atr;
wx->configure_fdir = txgbe_configure_fdir;
/* enable itr by default in dynamic mode */ /* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1; wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1; wx->tx_itr_setting = 1;
...@@ -274,6 +283,12 @@ static int txgbe_sw_init(struct wx *wx) ...@@ -274,6 +283,12 @@ static int txgbe_sw_init(struct wx *wx)
return 0; return 0;
} }
static void txgbe_init_fdir(struct txgbe *txgbe)
{
txgbe->fdir_filter_count = 0;
spin_lock_init(&txgbe->fdir_perfect_lock);
}
/** /**
* txgbe_open - Called when a network interface is made active * txgbe_open - Called when a network interface is made active
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -352,6 +367,7 @@ static int txgbe_close(struct net_device *netdev) ...@@ -352,6 +367,7 @@ static int txgbe_close(struct net_device *netdev)
txgbe_down(wx); txgbe_down(wx);
wx_free_irq(wx); wx_free_irq(wx);
wx_free_resources(wx); wx_free_resources(wx);
txgbe_fdir_filter_exit(wx);
wx_control_hw(wx, false); wx_control_hw(wx, false);
return 0; return 0;
...@@ -660,6 +676,8 @@ static int txgbe_probe(struct pci_dev *pdev, ...@@ -660,6 +676,8 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe->wx = wx; txgbe->wx = wx;
wx->priv = txgbe; wx->priv = txgbe;
txgbe_init_fdir(txgbe);
err = txgbe_setup_misc_irq(txgbe); err = txgbe_setup_misc_irq(txgbe);
if (err) if (err)
goto err_release_hw; goto err_release_hw;
......
...@@ -89,6 +89,55 @@ ...@@ -89,6 +89,55 @@
#define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_ADDR 0x13000
#define TXGBE_XPCS_IDA_DATA 0x13004 #define TXGBE_XPCS_IDA_DATA 0x13004
/********************************* Flow Director *****************************/
#define TXGBE_RDB_FDIR_DROP_QUEUE 127
#define TXGBE_RDB_FDIR_CTL 0x19500
#define TXGBE_RDB_FDIR_CTL_INIT_DONE BIT(3)
#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH BIT(4)
#define TXGBE_RDB_FDIR_CTL_DROP_Q(v) FIELD_PREP(GENMASK(14, 8), v)
#define TXGBE_RDB_FDIR_CTL_HASH_BITS(v) FIELD_PREP(GENMASK(23, 20), v)
#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH(v) FIELD_PREP(GENMASK(27, 24), v)
#define TXGBE_RDB_FDIR_CTL_FULL_THRESH(v) FIELD_PREP(GENMASK(31, 28), v)
#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 0-2 */
#define TXGBE_RDB_FDIR_SA 0x19518
#define TXGBE_RDB_FDIR_DA 0x1951C
#define TXGBE_RDB_FDIR_PORT 0x19520
#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16
#define TXGBE_RDB_FDIR_FLEX 0x19524
#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16
#define TXGBE_RDB_FDIR_HASH 0x19528
#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(v) FIELD_PREP(GENMASK(31, 16), v)
#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID BIT(15)
#define TXGBE_RDB_FDIR_CMD 0x1952C
#define TXGBE_RDB_FDIR_CMD_CMD_MASK GENMASK(1, 0)
#define TXGBE_RDB_FDIR_CMD_CMD(v) FIELD_PREP(GENMASK(1, 0), v)
#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW TXGBE_RDB_FDIR_CMD_CMD(1)
#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW TXGBE_RDB_FDIR_CMD_CMD(2)
#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT TXGBE_RDB_FDIR_CMD_CMD(3)
#define TXGBE_RDB_FDIR_CMD_FILTER_VALID BIT(2)
#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE BIT(3)
#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE(v) FIELD_PREP(GENMASK(6, 5), v)
#define TXGBE_RDB_FDIR_CMD_DROP BIT(9)
#define TXGBE_RDB_FDIR_CMD_LAST BIT(11)
#define TXGBE_RDB_FDIR_CMD_QUEUE_EN BIT(15)
#define TXGBE_RDB_FDIR_CMD_RX_QUEUE(v) FIELD_PREP(GENMASK(22, 16), v)
#define TXGBE_RDB_FDIR_CMD_VT_POOL(v) FIELD_PREP(GENMASK(29, 24), v)
#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C
#define TXGBE_RDB_FDIR_SA4_MSK 0x19540
#define TXGBE_RDB_FDIR_TCP_MSK 0x19544
#define TXGBE_RDB_FDIR_UDP_MSK 0x19548
#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560
#define TXGBE_RDB_FDIR_HKEY 0x19568
#define TXGBE_RDB_FDIR_SKEY 0x1956C
#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570
#define TXGBE_RDB_FDIR_OTHER_MSK_POOL BIT(2)
#define TXGBE_RDB_FDIR_OTHER_MSK_L4P BIT(3)
#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4))
#define TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 GENMASK(7, 0)
#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC FIELD_PREP(GENMASK(1, 0), 0)
#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2)
#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v)
/* Checksum and EEPROM pointers */ /* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F #define TXGBE_EEPROM_CHECKSUM 0x2F
...@@ -112,6 +161,98 @@ ...@@ -112,6 +161,98 @@
#define TXGBE_SP_RX_PB_SIZE 512 #define TXGBE_SP_RX_PB_SIZE 512
#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ #define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20
/* Software ATR hash keys */
#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
/* Software ATR input stream values and masks */
#define TXGBE_ATR_HASH_MASK 0x7fff
#define TXGBE_ATR_L4TYPE_MASK 0x3
#define TXGBE_ATR_L4TYPE_UDP 0x1
#define TXGBE_ATR_L4TYPE_TCP 0x2
#define TXGBE_ATR_L4TYPE_SCTP 0x3
#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4
#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10
enum txgbe_atr_flow_type {
TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
};
/* Flow Director ATR input struct. */
union txgbe_atr_input {
/* Byte layout in order, all values with MSB first:
*
* vm_pool - 1 byte
* flow_type - 1 byte
* vlan_id - 2 bytes
* dst_ip - 16 bytes
* src_ip - 16 bytes
* src_port - 2 bytes
* dst_port - 2 bytes
* flex_bytes - 2 bytes
* bkt_hash - 2 bytes
*/
struct {
u8 vm_pool;
u8 flow_type;
__be16 vlan_id;
__be32 dst_ip[4];
__be32 src_ip[4];
__be16 src_port;
__be16 dst_port;
__be16 flex_bytes;
__be16 bkt_hash;
} formatted;
__be32 dword_stream[11];
};
/* Flow Director compressed ATR hash input struct */
union txgbe_atr_hash_dword {
struct {
u8 vm_pool;
u8 flow_type;
__be16 vlan_id;
} formatted;
__be32 ip;
struct {
__be16 src;
__be16 dst;
} port;
__be16 flex_bytes;
__be32 dword;
};
enum txgbe_fdir_pballoc_type {
TXGBE_FDIR_PBALLOC_NONE = 0,
TXGBE_FDIR_PBALLOC_64K = 1,
TXGBE_FDIR_PBALLOC_128K = 2,
TXGBE_FDIR_PBALLOC_256K = 3,
};
struct txgbe_fdir_filter {
struct hlist_node fdir_node;
union txgbe_atr_input filter;
u16 sw_idx;
u16 action;
};
/* TX/RX descriptor defines */ /* TX/RX descriptor defines */
#define TXGBE_DEFAULT_TXD 512 #define TXGBE_DEFAULT_TXD 512
#define TXGBE_DEFAULT_TX_WORK 256 #define TXGBE_DEFAULT_TX_WORK 256
...@@ -196,6 +337,12 @@ struct txgbe { ...@@ -196,6 +337,12 @@ struct txgbe {
struct gpio_chip *gpio; struct gpio_chip *gpio;
unsigned int gpio_irq; unsigned int gpio_irq;
unsigned int link_irq; unsigned int link_irq;
/* flow director */
struct hlist_head fdir_filter_list;
union txgbe_atr_input fdir_mask;
int fdir_filter_count;
spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
}; };
#endif /* _TXGBE_TYPE_H_ */ #endif /* _TXGBE_TYPE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment