Commit 020aa5c7 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-tls-add-basic-TX-offload'

Jakub Kicinski says:

====================
nfp: tls: add basic TX offload

This series adds initial TLS offload support to the nfp driver.
Only TX side is added for now.  We need minor adjustments to
the core tls code:
 - expose the per-skb fallback helper;
 - grow the driver context slightly;
 - add a helper to get to the driver state more easily.
We only support TX offload for now, and only if all packets
keep coming in order.  For retransmissions we use the
aforementioned software fallback, and in case there are
local drops we completely give up on given TCP stream.

This will obviously be improved soon, this patch set is the
minimal, functional yet easily reviewable chunk.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ca72efb6 51a5e563
......@@ -20,6 +20,7 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
......
......@@ -16,6 +16,7 @@ nfp-objs := \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
ccm.o \
ccm_mbox.o \
nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
......@@ -34,6 +35,11 @@ nfp-objs := \
nfp_shared_buf.o \
nic/main.o
ifeq ($(CONFIG_TLS_DEVICE),y)
nfp-objs += \
crypto/tls.o
endif
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
nfp-objs += \
flower/action.o \
......
......@@ -7,9 +7,6 @@
#include "nfp_app.h"
#include "nfp_net.h"
#define NFP_CCM_TYPE_REPLY_BIT 7
#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
......
......@@ -9,6 +9,7 @@
#include <linux/wait.h>
struct nfp_app;
struct nfp_net;
/* Firmware ABI */
......@@ -21,15 +22,27 @@ enum nfp_ccm_type {
NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
NFP_CCM_TYPE_CRYPTO_RESET = 9,
NFP_CCM_TYPE_CRYPTO_ADD = 10,
NFP_CCM_TYPE_CRYPTO_DEL = 11,
NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
__NFP_CCM_TYPE_MAX,
};
#define NFP_CCM_ABI_VERSION 1
#define NFP_CCM_TYPE_REPLY_BIT 7
#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
struct nfp_ccm_hdr {
union {
struct {
u8 type;
u8 ver;
__be16 tag;
};
__be32 raw;
};
};
static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
......@@ -41,15 +54,31 @@ static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
return hdr->type;
}
static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
static inline __be16 __nfp_ccm_get_tag(struct sk_buff *skb)
{
struct nfp_ccm_hdr *hdr;
hdr = (struct nfp_ccm_hdr *)skb->data;
return be16_to_cpu(hdr->tag);
return hdr->tag;
}
static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
{
return be16_to_cpu(__nfp_ccm_get_tag(skb));
}
#define NFP_NET_MBOX_TLV_TYPE GENMASK(31, 16)
#define NFP_NET_MBOX_TLV_LEN GENMASK(15, 0)
enum nfp_ccm_mbox_tlv_type {
NFP_NET_MBOX_TLV_TYPE_UNKNOWN = 0,
NFP_NET_MBOX_TLV_TYPE_END = 1,
NFP_NET_MBOX_TLV_TYPE_MSG = 2,
NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP = 3,
NFP_NET_MBOX_TLV_TYPE_RESV = 4,
};
/* Implementation */
/**
......@@ -80,4 +109,13 @@ void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size);
bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
struct sk_buff *
nfp_ccm_mbox_alloc(struct nfp_net *nn, unsigned int req_size,
unsigned int reply_size, gfp_t flags);
int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type,
unsigned int reply_size,
unsigned int max_reply_size);
#endif
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2019 Netronome Systems, Inc. */
#ifndef NFP_CRYPTO_H
#define NFP_CRYPTO_H 1
struct nfp_net_tls_offload_ctx {
__be32 fw_handle[2];
u32 next_seq;
bool out_of_sync;
};
#ifdef CONFIG_TLS_DEVICE
int nfp_net_tls_init(struct nfp_net *nn);
#else
static inline int nfp_net_tls_init(struct nfp_net *nn)
{
return 0;
}
#endif
#endif
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2019 Netronome Systems, Inc. */
#ifndef NFP_CRYPTO_FW_H
#define NFP_CRYPTO_FW_H 1
#include "../ccm.h"
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
struct nfp_crypto_reply_simple {
struct nfp_ccm_hdr hdr;
__be32 error;
};
struct nfp_crypto_req_reset {
struct nfp_ccm_hdr hdr;
__be32 ep_id;
};
#define NFP_NET_TLS_IPVER GENMASK(15, 12)
#define NFP_NET_TLS_VLAN GENMASK(11, 0)
#define NFP_NET_TLS_VLAN_UNUSED 4095
struct nfp_crypto_req_add_front {
struct nfp_ccm_hdr hdr;
__be32 ep_id;
u8 resv[3];
u8 opcode;
u8 key_len;
__be16 ipver_vlan __packed;
u8 l4_proto;
};
struct nfp_crypto_req_add_back {
__be16 src_port;
__be16 dst_port;
__be32 key[8];
__be32 salt;
__be32 iv[2];
__be32 counter;
__be32 rec_no[2];
__be32 tcp_seq;
};
struct nfp_crypto_req_add_v4 {
struct nfp_crypto_req_add_front front;
__be32 src_ip;
__be32 dst_ip;
struct nfp_crypto_req_add_back back;
};
struct nfp_crypto_req_add_v6 {
struct nfp_crypto_req_add_front front;
__be32 src_ip[4];
__be32 dst_ip[4];
struct nfp_crypto_req_add_back back;
};
struct nfp_crypto_reply_add {
struct nfp_ccm_hdr hdr;
__be32 error;
__be32 handle[2];
};
struct nfp_crypto_req_del {
struct nfp_ccm_hdr hdr;
__be32 ep_id;
__be32 handle[2];
};
struct nfp_crypto_req_update {
struct nfp_ccm_hdr hdr;
__be32 ep_id;
u8 resv[3];
u8 opcode;
__be32 handle[2];
__be32 rec_no[2];
__be32 tcp_seq;
};
#endif
This diff is collapsed.
......@@ -12,11 +12,13 @@
#ifndef _NFP_NET_H_
#define _NFP_NET_H_
#include <linux/atomic.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/semaphore.h>
#include <net/xdp.h>
#include "nfp_net_ctrl.h"
......@@ -372,6 +374,11 @@ struct nfp_net_rx_ring {
* @hw_csum_tx_inner: Counter of inner TX checksum offload requests
* @tx_gather: Counter of packets with Gather DMA
* @tx_lso: Counter of LSO packets sent
* @hw_tls_tx: Counter of TLS packets sent with crypto offloaded to HW
* @tls_tx_fallback: Counter of TLS packets sent which had to be encrypted
* by the fallback path because packets came out of order
* @tls_tx_no_fallback: Counter of TLS packets not sent because the fallback
* path could not encrypt them
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
......@@ -409,21 +416,28 @@ struct nfp_net_r_vector {
u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_complete;
u64 hw_csum_rx_error;
u64 rx_replace_buf_alloc_fail;
struct nfp_net_tx_ring *xdp_ring;
struct u64_stats_sync tx_sync;
u64 tx_pkts;
u64 tx_bytes;
u64 hw_csum_tx;
u64 ____cacheline_aligned_in_smp hw_csum_tx;
u64 hw_csum_tx_inner;
u64 tx_gather;
u64 tx_lso;
u64 hw_tls_tx;
u64 hw_csum_rx_error;
u64 rx_replace_buf_alloc_fail;
u64 tls_tx_fallback;
u64 tls_tx_no_fallback;
u64 tx_errors;
u64 tx_busy;
/* Cold data follows */
u32 irq_vector;
irq_handler_t handler;
char name[IFNAMSIZ + 8];
......@@ -458,6 +472,7 @@ struct nfp_stat_pair {
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
* @chained_metadata_format: Firemware will use new metadata format
* @ktls_tx: Is kTLS TX enabled?
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
......@@ -482,6 +497,7 @@ struct nfp_net_dp {
u8 is_vf:1;
u8 chained_metadata_format:1;
u8 ktls_tx:1;
u8 rx_dma_dir;
u8 rx_offset;
......@@ -549,7 +565,7 @@ struct nfp_net_dp {
* @reconfig_timer: Timer for async reading of reconfig results
* @reconfig_in_progress_update: Update FW is processing now (debug only)
* @bar_lock: vNIC config BAR access lock, protects: update,
* mailbox area
* mailbox area, crypto TLV
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
......@@ -562,6 +578,13 @@ struct nfp_net_dp {
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @tlv_caps: Parsed TLV capabilities
* @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
* @ktls_no_space: Counter of firmware rejecting kTLS connection due to
* lack of space
* @mbox_cmsg: Common Control Message via vNIC mailbox state
* @mbox_cmsg.queue: CCM mbox queue of pending messages
* @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
* @mbox_cmsg.tag: CCM mbox message tag allocator
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
......@@ -620,7 +643,7 @@ struct nfp_net {
struct timer_list reconfig_timer;
u32 reconfig_in_progress_update;
struct mutex bar_lock;
struct semaphore bar_lock;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
......@@ -637,6 +660,16 @@ struct nfp_net {
struct nfp_net_tlv_caps tlv_caps;
unsigned int ktls_tx_conn_cnt;
atomic_t ktls_no_space;
struct {
struct sk_buff_head queue;
wait_queue_head_t wq;
u16 tag;
} mbox_cmsg;
struct dentry *debugfs_dir;
struct list_head vnic_list;
......@@ -848,12 +881,12 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
{
mutex_lock(&nn->bar_lock);
down(&nn->bar_lock);
}
static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
{
mutex_unlock(&nn->bar_lock);
up(&nn->bar_lock);
}
/* Globals */
......@@ -883,6 +916,7 @@ void nfp_ctrl_close(struct nfp_net *nn);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
......
......@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/overflow.h>
#include <linux/page_ref.h>
......@@ -37,6 +36,7 @@
#include <linux/vmalloc.h>
#include <linux/ktime.h>
#include <net/tls.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
......@@ -45,6 +45,7 @@
#include "nfp_net.h"
#include "nfp_net_sriov.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
......@@ -271,12 +272,10 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
*
* Return: Negative errno on error, 0 on success
*/
static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
lockdep_assert_held(&nn->bar_lock);
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
......@@ -331,7 +330,6 @@ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
lockdep_assert_held(&nn->bar_lock);
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
......@@ -804,6 +802,72 @@ static void nfp_net_tx_csum(struct nfp_net_dp *dp,
u64_stats_update_end(&r_vec->tx_sync);
}
#ifdef CONFIG_TLS_DEVICE
static struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
{
struct nfp_net_tls_offload_ctx *ntls;
struct sk_buff *nskb;
u32 datalen, seq;
if (likely(!dp->ktls_tx))
return skb;
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
return skb;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
if (unlikely(ntls->next_seq != seq || ntls->out_of_sync)) {
/* Pure ACK out of order already */
if (!datalen)
return skb;
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tls_tx_fallback++;
u64_stats_update_end(&r_vec->tx_sync);
nskb = tls_encrypt_skb(skb);
if (!nskb) {
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tls_tx_no_fallback++;
u64_stats_update_end(&r_vec->tx_sync);
return NULL;
}
/* encryption wasn't necessary */
if (nskb == skb)
return skb;
/* we don't re-check ring space */
if (unlikely(skb_is_nonlinear(nskb))) {
nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
dev_kfree_skb_any(nskb);
return NULL;
}
/* jump forward, a TX may have gotten lost, need to sync TX */
if (!ntls->out_of_sync && seq - ntls->next_seq < U32_MAX / 4)
ntls->out_of_sync = true;
*nr_frags = 0;
return nskb;
}
if (datalen) {
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->hw_tls_tx++;
u64_stats_update_end(&r_vec->tx_sync);
}
memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
ntls->next_seq += datalen;
return skb;
}
#endif
static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{
wmb();
......@@ -811,24 +875,47 @@ static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring->wr_ptr_add = 0;
}
static int nfp_net_prep_port_id(struct sk_buff *skb)
static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
u32 meta_id = 0;
int md_bytes;
if (likely(!md_dst))
if (likely(!md_dst && !tls_handle))
return 0;
if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
if (!tls_handle)
return 0;
md_dst = NULL;
}
md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
if (unlikely(skb_cow_head(skb, 8)))
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
data = skb_push(skb, 8);
put_unaligned_be32(NFP_NET_META_PORTID, data);
put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
meta_id = 0;
data = skb_push(skb, md_bytes) + md_bytes;
if (md_dst) {
data -= 4;
put_unaligned_be32(md_dst->u.port_info.port_id, data);
meta_id = NFP_NET_META_PORTID;
}
if (tls_handle) {
/* conn handle is opaque, we just use u64 to be able to quickly
* compare it to zero
*/
data -= 8;
memcpy(data, &tls_handle, sizeof(tls_handle));
meta_id <<= NFP_NET_META_FIELD_SIZE;
meta_id |= NFP_NET_META_CONN_HANDLE;
}
return 8;
data -= 4;
put_unaligned_be32(meta_id, data);
return md_bytes;
}
/**
......@@ -851,6 +938,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
u64 tls_handle = 0;
u16 qidx;
dp = &nn->dp;
......@@ -872,18 +960,23 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
md_bytes = nfp_net_prep_port_id(skb);
if (unlikely(md_bytes < 0)) {
#ifdef CONFIG_TLS_DEVICE
skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
if (unlikely(!skb)) {
nfp_net_tx_xmit_more_flush(tx_ring);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
#endif
md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
if (unlikely(md_bytes < 0))
goto err_flush;
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(dp->dev, dma_addr))
goto err_free;
goto err_dma_err;
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
......@@ -979,8 +1072,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring->txbufs[wr_idx].fidx = -2;
err_free:
err_dma_err:
nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
err_flush:
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
......@@ -3704,13 +3798,16 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
mutex_init(&nn->bar_lock);
sema_init(&nn->bar_lock, 1);
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
skb_queue_head_init(&nn->mbox_cmsg.queue);
init_waitqueue_head(&nn->mbox_cmsg.wq);
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
......@@ -3733,8 +3830,7 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
mutex_destroy(&nn->bar_lock);
WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
......@@ -4009,9 +4105,14 @@ int nfp_net_init(struct nfp_net *nn)
if (err)
return err;
if (nn->dp.netdev)
if (nn->dp.netdev) {
nfp_net_netdev_init(nn);
err = nfp_net_tls_init(nn);
if (err)
return err;
}
nfp_net_vecs_init(nn);
if (!nn->dp.netdev)
......
......@@ -99,6 +99,21 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->repr_cap = readl(data);
break;
case NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
if (length >= 4)
caps->mbox_cmsg_types = readl(data);
break;
case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
if (length < 32) {
dev_err(dev,
"CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
length, offset);
return -EINVAL;
}
caps->crypto_ops = readl(data);
caps->crypto_enable_off = data - ctrl_mem + 16;
break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
break;
......
......@@ -44,6 +44,7 @@
#define NFP_NET_META_MARK 2
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
#define NFP_META_PORT_ID_CTRL ~0U
......@@ -135,6 +136,7 @@
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */
#define NFP_NET_CFG_UPDATE_CRYPTO (0x1 << 14) /* Crypto on/off */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
......@@ -394,6 +396,7 @@
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
/**
* VLAN filtering using general use mailbox
......@@ -466,6 +469,16 @@
* %NFP_NET_CFG_TLV_TYPE_REPR_CAP:
* Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which
* can be used on representors.
*
* %NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
* Variable, bitmap of control message types supported by the mailbox handler.
* Bit 0 corresponds to message type 0, bit 1 to 1, etc. Control messages are
* encapsulated into simple TLVs, with an end TLV and written to the Mailbox.
*
* %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
* 8 words, bitmaps of supported and enabled crypto operations.
* First 16B (4 words) contains a bitmap of supported crypto operations,
* and next 16B contain the enabled operations.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
......@@ -475,6 +488,8 @@
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
struct device;
......@@ -484,12 +499,18 @@ struct device;
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
* @repr_cap: capabilities for representors
* @mbox_cmsg_types: cmsgs which can be passed through the mailbox
* @crypto_ops: supported crypto operations
* @crypto_enable_off: offset of crypto ops enable region
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
unsigned int mbox_off;
unsigned int mbox_len;
u32 repr_cap;
u32 mbox_cmsg_types;
u32 crypto_ops;
unsigned int crypto_enable_off;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
......
......@@ -150,8 +150,9 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
#define NN_RVEC_GATHER_STATS 9
#define NN_RVEC_GATHER_STATS 12
#define NN_RVEC_PER_Q_STATS 3
#define NN_CTRL_PATH_STATS 1
#define SFP_SFF_REV_COMPLIANCE 1
......@@ -423,7 +424,8 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
NN_CTRL_PATH_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
......@@ -446,6 +448,11 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso");
data = nfp_pr_et(data, "tx_tls_encrypted");
data = nfp_pr_et(data, "tx_tls_ooo");
data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
data = nfp_pr_et(data, "hw_tls_no_space");
return data;
}
......@@ -478,6 +485,9 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[6] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[7] = nn->r_vecs[i].tx_gather;
tmp[8] = nn->r_vecs[i].tx_lso;
tmp[9] = nn->r_vecs[i].hw_tls_tx;
tmp[10] = nn->r_vecs[i].tls_tx_fallback;
tmp[11] = nn->r_vecs[i].tls_tx_no_fallback;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
......@@ -489,6 +499,8 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
*data++ = atomic_read(&nn->ktls_no_space);
return data;
}
......
......@@ -40,6 +40,7 @@
#include <linux/socket.h>
#include <linux/tcp.h>
#include <linux/skmsg.h>
#include <linux/netdevice.h>
#include <net/tcp.h>
#include <net/strparser.h>
......@@ -197,17 +198,16 @@ struct tls_offload_context_tx {
struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
void (*sk_destruct)(struct sock *sk);
u8 driver_state[];
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
#define TLS_DRIVER_STATE_SIZE_TX 16
};
#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
(sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
struct cipher_context {
char *iv;
......@@ -302,16 +302,16 @@ struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
atomic64_t resync_req;
u8 driver_state[];
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
#define TLS_DRIVER_STATE_SIZE_RX 8
};
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
......@@ -556,6 +556,23 @@ tls_offload_ctx_rx(const struct tls_context *tls_ctx)
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
#if IS_ENABLED(CONFIG_TLS_DEVICE)
static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
return tls_offload_ctx_tx(tls_ctx)->driver_state;
else
return tls_offload_ctx_rx(tls_ctx)->driver_state;
}
static inline void *
tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
{
return __tls_driver_ctx(tls_get_ctx(sk), direction);
}
#endif
/* The TLS context is valid until sk_destruct is called */
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
......@@ -573,6 +590,7 @@ void tls_unregister_device(struct tls_device *device);
int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
......
......@@ -426,6 +426,12 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
}
EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
{
return tls_sw_fallback(skb->sk, skb);
}
EXPORT_SYMBOL_GPL(tls_encrypt_skb);
int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment