Commit 7a8bca04 authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-tso-v2'

Edward Cree says:

====================
sfc: Firmware-Assisted TSO version 2

The firmware on 8000 series SFC NICs supports a new TSO API ("FATSOv2"), and
 7000 series NICs will also support this in an imminent release.  This series
 adds driver support for this TSO implementation.
The series also removes SWTSO, as it's now equivalent to GSO.  This does not
 actually remove very much code, because SWTSO was grotesquely intertwingled
 with FATSOv1, which will also be removed once 7000 series supports FATSOv2.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c7d03a00 46d1efd8
sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \ sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \ tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o mcdi.o mcdi_port.o mcdi_mon.o ptp.o tx_tso.o
sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
......
...@@ -2086,6 +2086,92 @@ static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, ...@@ -2086,6 +2086,92 @@ static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
ER_DZ_TX_DESC_UPD, tx_queue->queue); ER_DZ_TX_DESC_UPD, tx_queue->queue);
} }
/* Add Firmware-Assisted TSO v2 option descriptors to a queue.
*/
static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
struct sk_buff *skb,
bool *data_mapped)
{
struct efx_tx_buffer *buffer;
struct tcphdr *tcp;
struct iphdr *ip;
u16 ipv4_id;
u32 seqnum;
u32 mss;
EFX_BUG_ON_PARANOID(tx_queue->tso_version != 2);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(mss < 4)) {
WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
return -EINVAL;
}
ip = ip_hdr(skb);
if (ip->version == 4) {
/* Modify IPv4 header if needed. */
ip->tot_len = 0;
ip->check = 0;
ipv4_id = ip->id;
} else {
/* Modify IPv6 header if needed. */
struct ipv6hdr *ipv6 = ipv6_hdr(skb);
ipv6->payload_len = 0;
ipv4_id = 0;
}
tcp = tcp_hdr(skb);
seqnum = ntohl(tcp->seq);
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
buffer->flags = EFX_TX_BUF_OPTION;
buffer->len = 0;
buffer->unmap_len = 0;
EFX_POPULATE_QWORD_5(buffer->option,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
ESF_DZ_TX_TSO_IP_ID, ipv4_id,
ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
);
++tx_queue->insert_count;
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
buffer->flags = EFX_TX_BUF_OPTION;
buffer->len = 0;
buffer->unmap_len = 0;
EFX_POPULATE_QWORD_4(buffer->option,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
ESF_DZ_TX_TSO_TCP_MSS, mss
);
++tx_queue->insert_count;
return 0;
}
static u32 efx_ef10_tso_versions(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 tso_versions = 0;
if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
tso_versions |= BIT(1);
if (nic_data->datapath_caps2 &
(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
tso_versions |= BIT(2);
return tso_versions;
}
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
...@@ -2095,6 +2181,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ...@@ -2095,6 +2181,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
struct efx_channel *channel = tx_queue->channel; struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data = efx->nic_data; struct efx_ef10_nic_data *nic_data = efx->nic_data;
bool tso_v2 = false;
size_t inlen; size_t inlen;
dma_addr_t dma_addr; dma_addr_t dma_addr;
efx_qword_t *txd; efx_qword_t *txd;
...@@ -2102,13 +2189,21 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ...@@ -2102,13 +2189,21 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
int i; int i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
/* TSOv2 is a limited resource that can only be configured on a limited
* number of queues. TSO without checksum offload is not really a thing,
* so we only enable it for those queues.
*/
if (csum_offload && (nic_data->datapath_caps2 &
(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) {
tso_v2 = true;
netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
channel->channel);
}
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
...@@ -2124,10 +2219,30 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ...@@ -2124,10 +2219,30 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, do {
MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS,
/* This flag was removed from mcdi_pcol.h for
* the non-_EXT version of INIT_TXQ. However,
* firmware still honours it.
*/
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
NULL, 0, NULL); NULL, 0, NULL);
if (rc) if (rc == -ENOSPC && tso_v2) {
/* Retry without TSOv2 if we're short on contexts. */
tso_v2 = false;
netif_warn(efx, probe, efx->net_dev,
"TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
} else if (rc) {
efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
MC_CMD_INIT_TXQ_EXT_IN_LEN,
NULL, 0, rc);
goto fail; goto fail;
}
} while (rc);
/* A previous user of this TX queue might have set us up the /* A previous user of this TX queue might have set us up the
* bomb by writing a descriptor to the TX push collector but * bomb by writing a descriptor to the TX push collector but
...@@ -2146,7 +2261,10 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) ...@@ -2146,7 +2261,10 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
tx_queue->write_count = 1; tx_queue->write_count = 1;
if (nic_data->datapath_caps & if (tso_v2) {
tx_queue->handle_tso = efx_ef10_tx_tso_desc;
tx_queue->tso_version = 2;
} else if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
tx_queue->tso_version = 1; tx_queue->tso_version = 1;
} }
...@@ -2202,6 +2320,25 @@ static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) ...@@ -2202,6 +2320,25 @@ static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
} }
#define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len)
{
if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
/* If we need to break across multiple descriptors we should
* stop at a page boundary. This assumes the length limit is
* greater than the page size.
*/
dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
}
return len;
}
static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
{ {
unsigned int old_write_count = tx_queue->write_count; unsigned int old_write_count = tx_queue->write_count;
...@@ -5469,6 +5606,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { ...@@ -5469,6 +5606,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.tx_init = efx_ef10_tx_init, .tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove, .tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write, .tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe, .rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init, .rx_init = efx_ef10_rx_init,
...@@ -5575,6 +5713,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { ...@@ -5575,6 +5713,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init, .tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove, .tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write, .tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe, .rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init, .rx_init = efx_ef10_rx_init,
...@@ -5634,6 +5773,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { ...@@ -5634,6 +5773,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
#endif #endif
.get_mac_address = efx_ef10_get_mac_address_pf, .get_mac_address = efx_ef10_get_mac_address_pf,
.set_mac_address = efx_ef10_set_mac_address, .set_mac_address = efx_ef10_set_mac_address,
.tso_versions = efx_ef10_tso_versions,
.revision = EFX_REV_HUNT_A0, .revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
......
/**************************************************************************** /****************************************************************************
* Driver for Solarflare network controllers and boards * Driver for Solarflare network controllers and boards
* Copyright 2012-2013 Solarflare Communications Inc. * Copyright 2012-2015 Solarflare Communications Inc.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published * under the terms of the GNU General Public License version 2 as published
...@@ -147,8 +147,14 @@ ...@@ -147,8 +147,14 @@
#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1 #define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
#define ESF_DZ_RX_DROP_EVENT_LBN 58 #define ESF_DZ_RX_DROP_EVENT_LBN 58
#define ESF_DZ_RX_DROP_EVENT_WIDTH 1 #define ESF_DZ_RX_DROP_EVENT_WIDTH 1
#define ESF_DZ_RX_EV_RSVD2_LBN 54 #define ESF_DD_RX_EV_RSVD2_LBN 54
#define ESF_DZ_RX_EV_RSVD2_WIDTH 4 #define ESF_DD_RX_EV_RSVD2_WIDTH 4
#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56
#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1
#define ESF_EZ_RX_EV_RSVD2_LBN 54
#define ESF_EZ_RX_EV_RSVD2_WIDTH 2
#define ESF_DZ_RX_EV_SOFT2_LBN 52 #define ESF_DZ_RX_EV_SOFT2_LBN 52
#define ESF_DZ_RX_EV_SOFT2_WIDTH 2 #define ESF_DZ_RX_EV_SOFT2_WIDTH 2
#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 #define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
...@@ -192,12 +198,21 @@ ...@@ -192,12 +198,21 @@
#define ESF_DZ_RX_MAC_CLASS_WIDTH 1 #define ESF_DZ_RX_MAC_CLASS_WIDTH 1
#define ESE_DZ_MAC_CLASS_MCAST 1 #define ESE_DZ_MAC_CLASS_MCAST 1
#define ESE_DZ_MAC_CLASS_UCAST 0 #define ESE_DZ_MAC_CLASS_UCAST 0
#define ESF_DZ_RX_EV_SOFT1_LBN 32 #define ESF_DD_RX_EV_SOFT1_LBN 32
#define ESF_DZ_RX_EV_SOFT1_WIDTH 3 #define ESF_DD_RX_EV_SOFT1_WIDTH 3
#define ESF_DZ_RX_EV_RSVD1_LBN 31 #define ESF_EZ_RX_EV_SOFT1_LBN 34
#define ESF_DZ_RX_EV_RSVD1_WIDTH 1 #define ESF_EZ_RX_EV_SOFT1_WIDTH 1
#define ESF_DZ_RX_ABORT_LBN 30 #define ESF_EZ_RX_ENCAP_HDR_LBN 32
#define ESF_DZ_RX_ABORT_WIDTH 1 #define ESF_EZ_RX_ENCAP_HDR_WIDTH 2
#define ESE_EZ_ENCAP_HDR_GRE 2
#define ESE_EZ_ENCAP_HDR_VXLAN 1
#define ESE_EZ_ENCAP_HDR_NONE 0
#define ESF_DD_RX_EV_RSVD1_LBN 30
#define ESF_DD_RX_EV_RSVD1_WIDTH 2
#define ESF_EZ_RX_EV_RSVD1_LBN 31
#define ESF_EZ_RX_EV_RSVD1_WIDTH 1
#define ESF_EZ_RX_ABORT_LBN 30
#define ESF_EZ_RX_ABORT_WIDTH 1
#define ESF_DZ_RX_ECC_ERR_LBN 29 #define ESF_DZ_RX_ECC_ERR_LBN 29
#define ESF_DZ_RX_ECC_ERR_WIDTH 1 #define ESF_DZ_RX_ECC_ERR_WIDTH 1
#define ESF_DZ_RX_CRC1_ERR_LBN 28 #define ESF_DZ_RX_CRC1_ERR_LBN 28
...@@ -235,6 +250,12 @@ ...@@ -235,6 +250,12 @@
#define ESE_DZ_TX_OPTION_DESC_TSO 7 #define ESE_DZ_TX_OPTION_DESC_TSO 7
#define ESE_DZ_TX_OPTION_DESC_VLAN 6 #define ESE_DZ_TX_OPTION_DESC_VLAN 6
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 #define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8
#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1
#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7
#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1
#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6
#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1
#define ESF_DZ_TX_TIMESTAMP_LBN 5 #define ESF_DZ_TX_TIMESTAMP_LBN 5
#define ESF_DZ_TX_TIMESTAMP_WIDTH 1 #define ESF_DZ_TX_TIMESTAMP_WIDTH 1
#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2 #define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
...@@ -257,14 +278,22 @@ ...@@ -257,14 +278,22 @@
#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1 #define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
#define ESF_DZ_TX_DROP_EVENT_LBN 58 #define ESF_DZ_TX_DROP_EVENT_LBN 58
#define ESF_DZ_TX_DROP_EVENT_WIDTH 1 #define ESF_DZ_TX_DROP_EVENT_WIDTH 1
#define ESF_DZ_TX_EV_RSVD_LBN 48 #define ESF_DD_TX_EV_RSVD_LBN 48
#define ESF_DZ_TX_EV_RSVD_WIDTH 10 #define ESF_DD_TX_EV_RSVD_WIDTH 10
#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56
#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1
#define ESF_EZ_TX_EV_RSVD_LBN 48
#define ESF_EZ_TX_EV_RSVD_WIDTH 8
#define ESF_DZ_TX_SOFT2_LBN 32 #define ESF_DZ_TX_SOFT2_LBN 32
#define ESF_DZ_TX_SOFT2_WIDTH 16 #define ESF_DZ_TX_SOFT2_WIDTH 16
#define ESF_DZ_TX_CAN_MERGE_LBN 31 #define ESF_DD_TX_SOFT1_LBN 24
#define ESF_DZ_TX_CAN_MERGE_WIDTH 1 #define ESF_DD_TX_SOFT1_WIDTH 8
#define ESF_DZ_TX_SOFT1_LBN 24 #define ESF_EZ_TX_CAN_MERGE_LBN 31
#define ESF_DZ_TX_SOFT1_WIDTH 7 #define ESF_EZ_TX_CAN_MERGE_WIDTH 1
#define ESF_EZ_TX_SOFT1_LBN 24
#define ESF_EZ_TX_SOFT1_WIDTH 7
#define ESF_DZ_TX_QLABEL_LBN 16 #define ESF_DZ_TX_QLABEL_LBN 16
#define ESF_DZ_TX_QLABEL_WIDTH 5 #define ESF_DZ_TX_QLABEL_WIDTH 5
#define ESF_DZ_TX_DESCR_INDX_LBN 0 #define ESF_DZ_TX_DESCR_INDX_LBN 0
...@@ -301,6 +330,10 @@ ...@@ -301,6 +330,10 @@
#define ESE_DZ_TX_OPTION_DESC_TSO 7 #define ESE_DZ_TX_OPTION_DESC_TSO 7
#define ESE_DZ_TX_OPTION_DESC_VLAN 6 #define ESE_DZ_TX_OPTION_DESC_VLAN 6
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 #define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 #define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8 #define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
#define ESF_DZ_TX_TSO_IP_ID_LBN 32 #define ESF_DZ_TX_TSO_IP_ID_LBN 32
...@@ -308,6 +341,46 @@ ...@@ -308,6 +341,46 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 #define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 #define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
/* TX_TSO_FATSO2A_DESC */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
#define ESE_DZ_TX_OPTION_DESC_TSO 7
#define ESE_DZ_TX_OPTION_DESC_VLAN 6
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_IP_ID_LBN 32
#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
/* TX_TSO_FATSO2B_DESC */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
#define ESE_DZ_TX_OPTION_DESC_TSO 7
#define ESE_DZ_TX_OPTION_DESC_VLAN 6
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 0
#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
/*************************************************************************/ /*************************************************************************/
/* TX_DESC_UPD_REG: Transmit descriptor update register. /* TX_DESC_UPD_REG: Transmit descriptor update register.
......
...@@ -3200,23 +3200,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev, ...@@ -3200,23 +3200,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
efx = netdev_priv(net_dev); efx = netdev_priv(net_dev);
efx->type = (const struct efx_nic_type *) entry->driver_data; efx->type = (const struct efx_nic_type *) entry->driver_data;
efx->fixed_features |= NETIF_F_HIGHDMA; efx->fixed_features |= NETIF_F_HIGHDMA;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
net_dev->hw_features = net_dev->features & ~efx->fixed_features;
/* Disable VLAN filtering by default. It may be enforced if
* the feature is fixed (i.e. VLAN filters are required to
* receive VLAN tagged packets due to vPort restrictions).
*/
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
pci_set_drvdata(pci_dev, efx); pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev); SET_NETDEV_DEV(net_dev, &pci_dev->dev);
...@@ -3239,6 +3222,27 @@ static int efx_pci_probe(struct pci_dev *pci_dev, ...@@ -3239,6 +3222,27 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc) if (rc)
goto fail3; goto fail3;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Check whether device supports TSO */
if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
net_dev->features &= ~NETIF_F_ALL_TSO;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
net_dev->hw_features = net_dev->features & ~efx->fixed_features;
/* Disable VLAN filtering by default. It may be enforced if
* the feature is fixed (i.e. VLAN filters are required to
* receive VLAN tagged packets due to vPort restrictions).
*/
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
rc = efx_register_netdev(efx); rc = efx_register_netdev(efx);
if (rc) if (rc)
goto fail4; goto fail4;
......
...@@ -69,8 +69,10 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { ...@@ -69,8 +69,10 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes), EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets), EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
......
...@@ -2750,6 +2750,7 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -2750,6 +2750,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init, .tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove, .tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write, .tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
.rx_push_rss_config = dummy_rx_push_rss_config, .rx_push_rss_config = dummy_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe, .rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init, .rx_init = efx_farch_rx_init,
...@@ -2849,6 +2850,7 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -2849,6 +2850,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.tx_init = efx_farch_tx_init, .tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove, .tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write, .tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
.rx_push_rss_config = falcon_b0_rx_push_rss_config, .rx_push_rss_config = falcon_b0_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe, .rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init, .rx_init = efx_farch_rx_init,
......
...@@ -356,6 +356,21 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue) ...@@ -356,6 +356,21 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
} }
} }
unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len)
{
/* Don't cross 4K boundaries with descriptors. */
unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
len = min(limit, len);
if (EFX_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
return len;
}
/* Allocate hardware resources for a TX queue */ /* Allocate hardware resources for a TX queue */
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
{ {
......
This diff is collapsed.
...@@ -189,13 +189,17 @@ struct efx_tx_buffer { ...@@ -189,13 +189,17 @@ struct efx_tx_buffer {
* @channel: The associated channel * @channel: The associated channel
* @core_txq: The networking core TX queue structure * @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring * @buffer: The software buffer ring
* @tsoh_page: Array of pages of TSO header buffers * @cb_page: Array of pages of copy buffers. Carved up according to
* %EFX_TX_CB_ORDER into %EFX_TX_CB_SIZE-sized chunks.
* @txd: The hardware descriptor ring * @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1. * @ptr_mask: The size of the ring minus 1.
* @piobuf: PIO buffer region for this TX queue (shared with its partner). * @piobuf: PIO buffer region for this TX queue (shared with its partner).
* Size of the region is efx_piobuf_size. * Size of the region is efx_piobuf_size.
* @piobuf_offset: Buffer offset to be specified in PIO descriptors * @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised? * @initialised: Has hardware queue been initialised?
* @tx_min_size: Minimum transmit size for this queue. Depends on HW.
* @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
* may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer. * @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings. * This is the number of buffers that have been removed from both rings.
* @old_write_count: The value of @write_count when last checked. * @old_write_count: The value of @write_count when last checked.
...@@ -221,9 +225,11 @@ struct efx_tx_buffer { ...@@ -221,9 +225,11 @@ struct efx_tx_buffer {
* @tso_long_headers: Number of packets with headers too long for standard * @tso_long_headers: Number of packets with headers too long for standard
* blocks * blocks
* @tso_packets: Number of packets via the TSO xmit path * @tso_packets: Number of packets via the TSO xmit path
* @tso_fallbacks: Number of times TSO fallback used
* @pushes: Number of times the TX push feature has been used * @pushes: Number of times the TX push feature has been used
* @pio_packets: Number of times the TX PIO feature has been used * @pio_packets: Number of times the TX PIO feature has been used
* @xmit_more_available: Are any packets waiting to be pushed to the NIC * @xmit_more_available: Are any packets waiting to be pushed to the NIC
* @cb_packets: Number of times the TX copybreak feature has been used
* @empty_read_count: If the completion path has seen the queue as empty * @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of * and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
...@@ -236,12 +242,16 @@ struct efx_tx_queue { ...@@ -236,12 +242,16 @@ struct efx_tx_queue {
struct efx_channel *channel; struct efx_channel *channel;
struct netdev_queue *core_txq; struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
struct efx_buffer *tsoh_page; struct efx_buffer *cb_page;
struct efx_special_buffer txd; struct efx_special_buffer txd;
unsigned int ptr_mask; unsigned int ptr_mask;
void __iomem *piobuf; void __iomem *piobuf;
unsigned int piobuf_offset; unsigned int piobuf_offset;
bool initialised; bool initialised;
unsigned int tx_min_size;
/* Function pointers used in the fast path. */
int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
/* Members used mainly on the completion path */ /* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp; unsigned int read_count ____cacheline_aligned_in_smp;
...@@ -257,9 +267,11 @@ struct efx_tx_queue { ...@@ -257,9 +267,11 @@ struct efx_tx_queue {
unsigned int tso_bursts; unsigned int tso_bursts;
unsigned int tso_long_headers; unsigned int tso_long_headers;
unsigned int tso_packets; unsigned int tso_packets;
unsigned int tso_fallbacks;
unsigned int pushes; unsigned int pushes;
unsigned int pio_packets; unsigned int pio_packets;
bool xmit_more_available; bool xmit_more_available;
unsigned int cb_packets;
/* Statistics to supplement MAC stats */ /* Statistics to supplement MAC stats */
unsigned long tx_packets; unsigned long tx_packets;
...@@ -269,6 +281,9 @@ struct efx_tx_queue { ...@@ -269,6 +281,9 @@ struct efx_tx_queue {
atomic_t flush_outstanding; atomic_t flush_outstanding;
}; };
#define EFX_TX_CB_ORDER 7
#define EFX_TX_CB_SIZE (1 << EFX_TX_CB_ORDER) - NET_IP_ALIGN
/** /**
* struct efx_rx_buffer - An Efx RX data buffer * struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer * @dma_addr: DMA base address of the buffer
...@@ -1212,6 +1227,8 @@ struct efx_mtd_partition { ...@@ -1212,6 +1227,8 @@ struct efx_mtd_partition {
* and tx_type will already have been validated but this operation * and tx_type will already have been validated but this operation
* must validate and update rx_filter. * must validate and update rx_filter.
* @set_mac_address: Set the MAC address of the device * @set_mac_address: Set the MAC address of the device
* @tso_versions: Returns mask of firmware-assisted TSO versions supported.
* If %NULL, then device does not support any TSO version.
* @revision: Hardware architecture revision * @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address * @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address
...@@ -1288,6 +1305,8 @@ struct efx_nic_type { ...@@ -1288,6 +1305,8 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue); void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue); void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue); void (*tx_write)(struct efx_tx_queue *tx_queue);
unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int (*rx_push_rss_config)(struct efx_nic *efx, bool user, int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
const u32 *rx_indir_table); const u32 *rx_indir_table);
int (*rx_probe)(struct efx_rx_queue *rx_queue); int (*rx_probe)(struct efx_rx_queue *rx_queue);
...@@ -1366,6 +1385,7 @@ struct efx_nic_type { ...@@ -1366,6 +1385,7 @@ struct efx_nic_type {
void (*vswitching_remove)(struct efx_nic *efx); void (*vswitching_remove)(struct efx_nic *efx);
int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr); int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
int (*set_mac_address)(struct efx_nic *efx); int (*set_mac_address)(struct efx_nic *efx);
u32 (*tso_versions)(struct efx_nic *efx);
int revision; int revision;
unsigned int txd_ptr_tbl_base; unsigned int txd_ptr_tbl_base;
...@@ -1545,4 +1565,32 @@ static inline netdev_features_t efx_supported_features(const struct efx_nic *efx ...@@ -1545,4 +1565,32 @@ static inline netdev_features_t efx_supported_features(const struct efx_nic *efx
return net_dev->features | net_dev->hw_features; return net_dev->features | net_dev->hw_features;
} }
/* Get the current TX queue insert index. */
static inline unsigned int
efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
{
return tx_queue->insert_count & tx_queue->ptr_mask;
}
/* Get a TX buffer. */
static inline struct efx_tx_buffer *
__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
{
return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
}
/* Get a TX buffer, checking it's not currently in use. */
static inline struct efx_tx_buffer *
efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer =
__efx_tx_queue_get_insert_buffer(tx_queue);
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
return buffer;
}
#endif /* EFX_NET_DRIVER_H */ #endif /* EFX_NET_DRIVER_H */
...@@ -681,6 +681,8 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue); ...@@ -681,6 +681,8 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
void efx_farch_tx_write(struct efx_tx_queue *tx_queue); void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
void efx_farch_rx_init(struct efx_rx_queue *rx_queue); void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
......
...@@ -977,6 +977,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -977,6 +977,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_init = efx_farch_tx_init, .tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove, .tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write, .tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
.rx_push_rss_config = siena_rx_push_rss_config, .rx_push_rss_config = siena_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe, .rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init, .rx_init = efx_farch_rx_init,
......
This diff is collapsed.
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_TX_H
#define EFX_TX_H
#include <linux/types.h>
/* Driver internal tx-path related declarations. */
unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, size_t len);
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
#endif /* EFX_TX_H */
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment