Commit b14a260e authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-tls-fixes-for-initial-TLS-support'

Jakub Kicinski says:

====================
nfp: tls: fixes for initial TLS support

This series brings various fixes to nfp tls offload recently added
to net-next.

First 4 patches revolve around device mailbox communication, trying
to make it more reliable. Next patch fixes statistical counter.
Patch 6 improves the TX resync if device communication failed.
Patch 7 makes sure we remove keys from memory after talking to FW.
Patch 8 adds missing tls context initialization, we fill in the
context information from various places based on the configuration
and looks like we missed the init in the case of where TX is
offloaded, but RX wasn't initialized yet. Patches 9 and 10 make
the nfp driver undo TLS state changes if we need to drop the
frame (e.g. due to DMA mapping error).

Last but not least TLS fallback should not adjust socket memory
after skb_orphan_partial(). This code will go away once we forbid
orphaning of skbs in need of crypto, but that's "real" -next
material, so lets do a quick fix.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3cab2afb 5c4b4608
...@@ -160,9 +160,9 @@ static void mlx5e_tls_del(struct net_device *netdev, ...@@ -160,9 +160,9 @@ static void mlx5e_tls_del(struct net_device *netdev,
direction == TLS_OFFLOAD_CTX_DIR_TX); direction == TLS_OFFLOAD_CTX_DIR_TX);
} }
static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
u32 seq, u8 *rcd_sn_data, u32 seq, u8 *rcd_sn_data,
enum tls_offload_ctx_dir direction) enum tls_offload_ctx_dir direction)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -177,6 +177,8 @@ static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, ...@@ -177,6 +177,8 @@ static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
be64_to_cpu(rcd_sn)); be64_to_cpu(rcd_sn));
mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
return 0;
} }
static const struct tlsdev_ops mlx5e_tls_ops = { static const struct tlsdev_ops mlx5e_tls_ops = {
......
...@@ -118,6 +118,10 @@ bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size); ...@@ -118,6 +118,10 @@ bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
struct sk_buff * struct sk_buff *
nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size, nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
unsigned int reply_size, gfp_t flags); unsigned int reply_size, gfp_t flags);
int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type,
unsigned int reply_size,
unsigned int max_reply_size, bool critical);
int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type, enum nfp_ccm_type type,
unsigned int reply_size, unsigned int reply_size,
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* form a batch. Threads come in with CMSG formed in an skb, then * form a batch. Threads come in with CMSG formed in an skb, then
* enqueue that skb onto the request queue. If threads skb is first * enqueue that skb onto the request queue. If threads skb is first
* in queue this thread will handle the mailbox operation. It copies * in queue this thread will handle the mailbox operation. It copies
* up to 16 messages into the mailbox (making sure that both requests * up to 64 messages into the mailbox (making sure that both requests
* and replies will fit. After FW is done processing the batch it * and replies will fit. After FW is done processing the batch it
* copies the data out and wakes waiting threads. * copies the data out and wakes waiting threads.
* If a thread is waiting it either gets its the message completed * If a thread is waiting it either gets its the message completed
...@@ -23,9 +23,9 @@ ...@@ -23,9 +23,9 @@
* to limit potential cache line bounces. * to limit potential cache line bounces.
*/ */
#define NFP_CCM_MBOX_BATCH_LIMIT 16 #define NFP_CCM_MBOX_BATCH_LIMIT 64
#define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000) #define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000)
#define NFP_CCM_MAX_QLEN 256 #define NFP_CCM_MAX_QLEN 1024
enum nfp_net_mbox_cmsg_state { enum nfp_net_mbox_cmsg_state {
NFP_NET_MBOX_CMSG_STATE_QUEUED, NFP_NET_MBOX_CMSG_STATE_QUEUED,
...@@ -515,13 +515,13 @@ nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb, ...@@ -515,13 +515,13 @@ nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
static int static int
nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb, nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type) enum nfp_ccm_type type, bool critical)
{ {
struct nfp_ccm_hdr *hdr; struct nfp_ccm_hdr *hdr;
assert_spin_locked(&nn->mbox_cmsg.queue.lock); assert_spin_locked(&nn->mbox_cmsg.queue.lock);
if (nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) { if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
nn_dp_warn(&nn->dp, "mailbox request queue too long\n"); nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
return -EBUSY; return -EBUSY;
} }
...@@ -536,10 +536,10 @@ nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb, ...@@ -536,10 +536,10 @@ nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
return 0; return 0;
} }
int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type, enum nfp_ccm_type type,
unsigned int reply_size, unsigned int reply_size,
unsigned int max_reply_size) unsigned int max_reply_size, bool critical)
{ {
int err; int err;
...@@ -550,7 +550,7 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, ...@@ -550,7 +550,7 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
spin_lock_bh(&nn->mbox_cmsg.queue.lock); spin_lock_bh(&nn->mbox_cmsg.queue.lock);
err = nfp_ccm_mbox_msg_enqueue(nn, skb, type); err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical);
if (err) if (err)
goto err_unlock; goto err_unlock;
...@@ -594,6 +594,15 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, ...@@ -594,6 +594,15 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
return err; return err;
} }
int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
enum nfp_ccm_type type,
unsigned int reply_size,
unsigned int max_reply_size)
{
return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size,
max_reply_size, false);
}
static void nfp_ccm_mbox_post_runq_work(struct work_struct *work) static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -650,7 +659,7 @@ int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb, ...@@ -650,7 +659,7 @@ int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
spin_lock_bh(&nn->mbox_cmsg.queue.lock); spin_lock_bh(&nn->mbox_cmsg.queue.lock);
err = nfp_ccm_mbox_msg_enqueue(nn, skb, type); err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false);
if (err) if (err)
goto err_unlock; goto err_unlock;
......
...@@ -31,6 +31,8 @@ struct nfp_crypto_req_add_front { ...@@ -31,6 +31,8 @@ struct nfp_crypto_req_add_front {
u8 key_len; u8 key_len;
__be16 ipver_vlan __packed; __be16 ipver_vlan __packed;
u8 l4_proto; u8 l4_proto;
#define NFP_NET_TLS_NON_ADDR_KEY_LEN 8
u8 l3_addrs[0];
}; };
struct nfp_crypto_req_add_back { struct nfp_crypto_req_add_back {
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/string.h>
#include <net/tls.h> #include <net/tls.h>
#include "../ccm.h" #include "../ccm.h"
...@@ -112,8 +113,9 @@ nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb, ...@@ -112,8 +113,9 @@ nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
struct nfp_crypto_reply_simple *reply; struct nfp_crypto_reply_simple *reply;
int err; int err;
err = nfp_ccm_mbox_communicate(nn, skb, type, err = __nfp_ccm_mbox_communicate(nn, skb, type,
sizeof(*reply), sizeof(*reply)); sizeof(*reply), sizeof(*reply),
type == NFP_CCM_TYPE_CRYPTO_DEL);
if (err) { if (err) {
nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err); nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
return err; return err;
...@@ -146,20 +148,38 @@ static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle) ...@@ -146,20 +148,38 @@ static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
NFP_CCM_TYPE_CRYPTO_DEL); NFP_CCM_TYPE_CRYPTO_DEL);
} }
static void
nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
{
front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
FIELD_PREP(NFP_NET_TLS_VLAN,
NFP_NET_TLS_VLAN_UNUSED));
}
static void
nfp_net_tls_assign_conn_id(struct nfp_net *nn,
struct nfp_crypto_req_add_front *front)
{
u32 len;
u64 id;
id = atomic64_inc_return(&nn->ktls_conn_id_gen);
len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
memcpy(front->l3_addrs, &id, sizeof(id));
memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
}
static struct nfp_crypto_req_add_back * static struct nfp_crypto_req_add_back *
nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk, nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
int direction) struct sock *sk, int direction)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
req->front.key_len += sizeof(__be32) * 2; req->front.key_len += sizeof(__be32) * 2;
req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 4) |
FIELD_PREP(NFP_NET_TLS_VLAN,
NFP_NET_TLS_VLAN_UNUSED));
if (direction == TLS_OFFLOAD_CTX_DIR_TX) { if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
req->src_ip = inet->inet_saddr; nfp_net_tls_assign_conn_id(nn, &req->front);
req->dst_ip = inet->inet_daddr;
} else { } else {
req->src_ip = inet->inet_daddr; req->src_ip = inet->inet_daddr;
req->dst_ip = inet->inet_saddr; req->dst_ip = inet->inet_saddr;
...@@ -169,20 +189,16 @@ nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk, ...@@ -169,20 +189,16 @@ nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk,
} }
static struct nfp_crypto_req_add_back * static struct nfp_crypto_req_add_back *
nfp_net_tls_set_ipv6(struct nfp_crypto_req_add_v6 *req, struct sock *sk, nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
int direction) struct sock *sk, int direction)
{ {
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
req->front.key_len += sizeof(struct in6_addr) * 2; req->front.key_len += sizeof(struct in6_addr) * 2;
req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 6) |
FIELD_PREP(NFP_NET_TLS_VLAN,
NFP_NET_TLS_VLAN_UNUSED));
if (direction == TLS_OFFLOAD_CTX_DIR_TX) { if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
memcpy(req->src_ip, &np->saddr, sizeof(req->src_ip)); nfp_net_tls_assign_conn_id(nn, &req->front);
memcpy(req->dst_ip, &sk->sk_v6_daddr, sizeof(req->dst_ip));
} else { } else {
memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip)); memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip)); memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
...@@ -202,8 +218,8 @@ nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front, ...@@ -202,8 +218,8 @@ nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
front->l4_proto = IPPROTO_TCP; front->l4_proto = IPPROTO_TCP;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) { if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
back->src_port = inet->inet_sport; back->src_port = 0;
back->dst_port = inet->inet_dport; back->dst_port = 0;
} else { } else {
back->src_port = inet->inet_dport; back->src_port = inet->inet_dport;
back->dst_port = inet->inet_sport; back->dst_port = inet->inet_sport;
...@@ -257,6 +273,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, ...@@ -257,6 +273,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
struct nfp_crypto_reply_add *reply; struct nfp_crypto_reply_add *reply;
struct sk_buff *skb; struct sk_buff *skb;
size_t req_sz; size_t req_sz;
void *req;
bool ipv6; bool ipv6;
int err; int err;
...@@ -299,14 +316,17 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, ...@@ -299,14 +316,17 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
front = (void *)skb->data; front = (void *)skb->data;
front->ep_id = 0; front->ep_id = 0;
front->key_len = 8; front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
front->opcode = nfp_tls_1_2_dir_to_opcode(direction); front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
memset(front->resv, 0, sizeof(front->resv)); memset(front->resv, 0, sizeof(front->resv));
nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
req = (void *)skb->data;
if (ipv6) if (ipv6)
back = nfp_net_tls_set_ipv6((void *)skb->data, sk, direction); back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
else else
back = nfp_net_tls_set_ipv4((void *)skb->data, sk, direction); back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
nfp_net_tls_set_l4(front, back, sk, direction); nfp_net_tls_set_l4(front, back, sk, direction);
...@@ -321,15 +341,29 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, ...@@ -321,15 +341,29 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq)); memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
/* Get an extra ref on the skb so we can wipe the key after */
skb_get(skb);
err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD, err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
sizeof(*reply), sizeof(*reply)); sizeof(*reply), sizeof(*reply));
reply = (void *)skb->data;
/* We depend on CCM MBOX code not reallocating skb we sent
* so we can clear the key material out of the memory.
*/
if (!WARN_ON_ONCE((u8 *)back < skb->head ||
(u8 *)back > skb_end_pointer(skb)) &&
!WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
memzero_explicit(back, sizeof(*back));
dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
if (err) { if (err) {
nn_dp_warn(&nn->dp, "failed to add TLS: %d\n", err); nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
err, direction == TLS_OFFLOAD_CTX_DIR_TX);
/* communicate frees skb on error */ /* communicate frees skb on error */
goto err_conn_remove; goto err_conn_remove;
} }
reply = (void *)skb->data;
err = -be32_to_cpu(reply->error); err = -be32_to_cpu(reply->error);
if (err) { if (err) {
if (err == -ENOSPC) { if (err == -ENOSPC) {
...@@ -383,7 +417,7 @@ nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx, ...@@ -383,7 +417,7 @@ nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
nfp_net_tls_del_fw(nn, ntls->fw_handle); nfp_net_tls_del_fw(nn, ntls->fw_handle);
} }
static void static int
nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
u8 *rcd_sn, enum tls_offload_ctx_dir direction) u8 *rcd_sn, enum tls_offload_ctx_dir direction)
{ {
...@@ -392,11 +426,12 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, ...@@ -392,11 +426,12 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
struct nfp_crypto_req_update *req; struct nfp_crypto_req_update *req;
struct sk_buff *skb; struct sk_buff *skb;
gfp_t flags; gfp_t flags;
int err;
flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC; flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags); skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
if (!skb) if (!skb)
return; return -ENOMEM;
ntls = tls_driver_ctx(sk, direction); ntls = tls_driver_ctx(sk, direction);
req = (void *)skb->data; req = (void *)skb->data;
...@@ -408,13 +443,17 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, ...@@ -408,13 +443,17 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no)); memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
if (direction == TLS_OFFLOAD_CTX_DIR_TX) { if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
nfp_net_tls_communicate_simple(nn, skb, "sync", err = nfp_net_tls_communicate_simple(nn, skb, "sync",
NFP_CCM_TYPE_CRYPTO_UPDATE); NFP_CCM_TYPE_CRYPTO_UPDATE);
if (err)
return err;
ntls->next_seq = seq; ntls->next_seq = seq;
} else { } else {
nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE, nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
sizeof(struct nfp_crypto_reply_simple)); sizeof(struct nfp_crypto_reply_simple));
} }
return 0;
} }
static const struct tlsdev_ops nfp_net_tls_ops = { static const struct tlsdev_ops nfp_net_tls_ops = {
......
...@@ -583,6 +583,7 @@ struct nfp_net_dp { ...@@ -583,6 +583,7 @@ struct nfp_net_dp {
* @tlv_caps: Parsed TLV capabilities * @tlv_caps: Parsed TLV capabilities
* @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections * @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
* @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections * @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections
* @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
* @ktls_no_space: Counter of firmware rejecting kTLS connection due to * @ktls_no_space: Counter of firmware rejecting kTLS connection due to
* lack of space * lack of space
* @mbox_cmsg: Common Control Message via vNIC mailbox state * @mbox_cmsg: Common Control Message via vNIC mailbox state
...@@ -670,6 +671,8 @@ struct nfp_net { ...@@ -670,6 +671,8 @@ struct nfp_net {
unsigned int ktls_tx_conn_cnt; unsigned int ktls_tx_conn_cnt;
unsigned int ktls_rx_conn_cnt; unsigned int ktls_rx_conn_cnt;
atomic64_t ktls_conn_id_gen;
atomic_t ktls_no_space; atomic_t ktls_no_space;
struct { struct {
......
...@@ -822,11 +822,11 @@ static void nfp_net_tx_csum(struct nfp_net_dp *dp, ...@@ -822,11 +822,11 @@ static void nfp_net_tx_csum(struct nfp_net_dp *dp,
u64_stats_update_end(&r_vec->tx_sync); u64_stats_update_end(&r_vec->tx_sync);
} }
#ifdef CONFIG_TLS_DEVICE
static struct sk_buff * static struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, u64 *tls_handle, int *nr_frags) struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
{ {
#ifdef CONFIG_TLS_DEVICE
struct nfp_net_tls_offload_ctx *ntls; struct nfp_net_tls_offload_ctx *ntls;
struct sk_buff *nskb; struct sk_buff *nskb;
bool resync_pending; bool resync_pending;
...@@ -880,15 +880,40 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, ...@@ -880,15 +880,40 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
if (datalen) { if (datalen) {
u64_stats_update_begin(&r_vec->tx_sync); u64_stats_update_begin(&r_vec->tx_sync);
r_vec->hw_tls_tx++; if (!skb_is_gso(skb))
r_vec->hw_tls_tx++;
else
r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
u64_stats_update_end(&r_vec->tx_sync); u64_stats_update_end(&r_vec->tx_sync);
} }
memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle)); memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
ntls->next_seq += datalen; ntls->next_seq += datalen;
#endif
return skb; return skb;
} }
static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
{
#ifdef CONFIG_TLS_DEVICE
struct nfp_net_tls_offload_ctx *ntls;
u32 datalen, seq;
if (!tls_handle)
return;
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
return;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
if (ntls->next_seq == seq + datalen)
ntls->next_seq = seq;
else
WARN_ON_ONCE(1);
#endif #endif
}
static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{ {
...@@ -982,13 +1007,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) ...@@ -982,13 +1007,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
#ifdef CONFIG_TLS_DEVICE
skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags); skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
if (unlikely(!skb)) { if (unlikely(!skb)) {
nfp_net_tx_xmit_more_flush(tx_ring); nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
#endif
md_bytes = nfp_net_prep_tx_meta(skb, tls_handle); md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
if (unlikely(md_bytes < 0)) if (unlikely(md_bytes < 0))
...@@ -1101,6 +1124,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) ...@@ -1101,6 +1124,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_begin(&r_vec->tx_sync); u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++; r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync); u64_stats_update_end(&r_vec->tx_sync);
nfp_net_tls_tx_undo(skb, tls_handle);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -304,9 +304,9 @@ struct tlsdev_ops { ...@@ -304,9 +304,9 @@ struct tlsdev_ops {
void (*tls_dev_del)(struct net_device *netdev, void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx, struct tls_context *ctx,
enum tls_offload_ctx_dir direction); enum tls_offload_ctx_dir direction);
void (*tls_dev_resync)(struct net_device *netdev, int (*tls_dev_resync)(struct net_device *netdev,
struct sock *sk, u32 seq, u8 *rcd_sn, struct sock *sk, u32 seq, u8 *rcd_sn,
enum tls_offload_ctx_dir direction); enum tls_offload_ctx_dir direction);
}; };
enum tls_offload_sync_type { enum tls_offload_sync_type {
......
...@@ -214,6 +214,7 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, ...@@ -214,6 +214,7 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
{ {
struct net_device *netdev; struct net_device *netdev;
struct sk_buff *skb; struct sk_buff *skb;
int err = 0;
u8 *rcd_sn; u8 *rcd_sn;
skb = tcp_write_queue_tail(sk); skb = tcp_write_queue_tail(sk);
...@@ -225,9 +226,12 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, ...@@ -225,9 +226,12 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
down_read(&device_offload_lock); down_read(&device_offload_lock);
netdev = tls_ctx->netdev; netdev = tls_ctx->netdev;
if (netdev) if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
TLS_OFFLOAD_CTX_DIR_TX); rcd_sn,
TLS_OFFLOAD_CTX_DIR_TX);
up_read(&device_offload_lock); up_read(&device_offload_lock);
if (err)
return;
clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags); clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
} }
...@@ -879,6 +883,8 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ...@@ -879,6 +883,8 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
goto free_offload_ctx; goto free_offload_ctx;
} }
prot->version = crypto_info->version;
prot->cipher_type = crypto_info->cipher_type;
prot->prepend_size = TLS_HEADER_SIZE + nonce_size; prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
prot->tag_size = tag_size; prot->tag_size = tag_size;
prot->overhead_size = prot->prepend_size + prot->tag_size; prot->overhead_size = prot->prepend_size + prot->tag_size;
......
...@@ -209,6 +209,10 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) ...@@ -209,6 +209,10 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
update_chksum(nskb, headln); update_chksum(nskb, headln);
/* sock_efree means skb must gone through skb_orphan_partial() */
if (nskb->destructor == sock_efree)
return;
delta = nskb->truesize - skb->truesize; delta = nskb->truesize - skb->truesize;
if (likely(delta < 0)) if (likely(delta < 0))
WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc)); WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment