Commit 6e16e67a authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'nfp-fix-schedule-in-atomic-context-when-offloading-sa'

Simon Horman says:

====================
nfp: fix schedule in atomic context when offloading sa

Yinjun Zhang says:

IPsec offloading callbacks may be called in atomic context, sleep is
not allowed in the implementation. Now use workqueue mechanism to
avoid this issue.

Extend existing workqueue mechanism for multicast configuration only
to universal use, so that all configuring through mailbox asynchoronously
can utilize it.

Also fix another two incorrect use of mailbox in IPsec:
 1. Need lock for race condition when accessing mbox
 2. Offset of mbox access should depends on tlv caps
====================

Link: https://lore.kernel.org/r/20230208102258.29639-1-simon.horman@corigine.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents d61615c3 71f814cd
......@@ -129,26 +129,31 @@ struct nfp_ipsec_cfg_mssg {
};
};
static int nfp_ipsec_cfg_cmd_issue(struct nfp_net *nn, int type, int saidx,
struct nfp_ipsec_cfg_mssg *msg)
static int nfp_net_ipsec_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
{
unsigned int offset = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
struct nfp_ipsec_cfg_mssg *msg = (struct nfp_ipsec_cfg_mssg *)entry->msg;
int i, msg_size, ret;
msg->cmd = type;
msg->sa_idx = saidx;
msg->rsp = 0;
msg_size = ARRAY_SIZE(msg->raw);
ret = nfp_net_mbox_lock(nn, sizeof(*msg));
if (ret)
return ret;
msg_size = ARRAY_SIZE(msg->raw);
for (i = 0; i < msg_size; i++)
nn_writel(nn, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
nn_writel(nn, offset + 4 * i, msg->raw[i]);
ret = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_IPSEC);
if (ret < 0)
ret = nfp_net_mbox_reconfig(nn, entry->cmd);
if (ret < 0) {
nn_ctrl_bar_unlock(nn);
return ret;
}
/* For now we always read the whole message response back */
for (i = 0; i < msg_size; i++)
msg->raw[i] = nn_readl(nn, NFP_NET_CFG_MBOX_VAL + 4 * i);
msg->raw[i] = nn_readl(nn, offset + 4 * i);
nn_ctrl_bar_unlock(nn);
switch (msg->rsp) {
case NFP_IPSEC_CFG_MSSG_OK:
......@@ -477,7 +482,10 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
}
/* Allocate saidx and commit the SA */
err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_ADD_SA, saidx, &msg);
msg.cmd = NFP_IPSEC_CFG_MSSG_ADD_SA;
msg.sa_idx = saidx;
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
sizeof(msg), nfp_net_ipsec_cfg);
if (err) {
xa_erase(&nn->xa_ipsec, saidx);
nn_err(nn, "Failed to issue IPsec command err ret=%d\n", err);
......@@ -491,14 +499,17 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
static void nfp_net_xfrm_del_state(struct xfrm_state *x)
{
struct nfp_ipsec_cfg_mssg msg = {
.cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
.sa_idx = x->xso.offload_handle - 1,
};
struct net_device *netdev = x->xso.dev;
struct nfp_ipsec_cfg_mssg msg;
struct nfp_net *nn;
int err;
nn = netdev_priv(netdev);
err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_INV_SA,
x->xso.offload_handle - 1, &msg);
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
sizeof(msg), nfp_net_ipsec_cfg);
if (err)
nn_warn(nn, "Failed to invalidate SA in hardware\n");
......
......@@ -617,9 +617,10 @@ struct nfp_net_dp {
* @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
* -EOPNOTSUPP to keep backwards compatibility (set by app)
* @port: Pointer to nfp_port structure if vNIC is a port
* @mc_lock: Protect mc_addrs list
* @mc_addrs: List of mc addrs to add/del to HW
* @mc_work: Work to update mc addrs
* @mbox_amsg: Asynchronously processed message via mailbox
* @mbox_amsg.lock: Protect message list
* @mbox_amsg.list: List of message to process
* @mbox_amsg.work: Work to process message asynchronously
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
......@@ -721,13 +722,25 @@ struct nfp_net {
struct nfp_port *port;
spinlock_t mc_lock;
struct list_head mc_addrs;
struct work_struct mc_work;
struct {
spinlock_t lock;
struct list_head list;
struct work_struct work;
} mbox_amsg;
void *app_priv;
};
struct nfp_mbox_amsg_entry {
struct list_head list;
int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
u32 cmd;
char msg[];
};
int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *));
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
......
......@@ -1334,14 +1334,54 @@ int nfp_ctrl_open(struct nfp_net *nn)
return err;
}
struct nfp_mc_addr_entry {
u8 addr[ETH_ALEN];
u32 cmd;
struct list_head list;
};
int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
{
struct nfp_mbox_amsg_entry *entry;
entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
if (!entry)
return -ENOMEM;
memcpy(entry->msg, data, len);
entry->cmd = cmd;
entry->cfg = cb;
spin_lock_bh(&nn->mbox_amsg.lock);
list_add_tail(&entry->list, &nn->mbox_amsg.list);
spin_unlock_bh(&nn->mbox_amsg.lock);
schedule_work(&nn->mbox_amsg.work);
return 0;
}
static void nfp_net_mbox_amsg_work(struct work_struct *work)
{
struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
struct nfp_mbox_amsg_entry *entry, *tmp;
struct list_head tmp_list;
INIT_LIST_HEAD(&tmp_list);
spin_lock_bh(&nn->mbox_amsg.lock);
list_splice_init(&nn->mbox_amsg.list, &tmp_list);
spin_unlock_bh(&nn->mbox_amsg.lock);
list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
int err = entry->cfg(nn, entry);
if (err)
nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
list_del(&entry->list);
kfree(entry);
}
}
static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
{
unsigned char *addr = entry->msg;
int ret;
ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
......@@ -1353,26 +1393,7 @@ static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
get_unaligned_be16(addr + 4));
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
static int nfp_net_mc_prep(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
{
struct nfp_mc_addr_entry *entry;
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
ether_addr_copy(entry->addr, addr);
entry->cmd = cmd;
spin_lock_bh(&nn->mc_lock);
list_add_tail(&entry->list, &nn->mc_addrs);
spin_unlock_bh(&nn->mc_lock);
schedule_work(&nn->mc_work);
return 0;
return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
}
static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
......@@ -1385,35 +1406,16 @@ static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
return -EINVAL;
}
return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
}
static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct nfp_net *nn = netdev_priv(netdev);
return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
}
static void nfp_net_mc_addr_config(struct work_struct *work)
{
struct nfp_net *nn = container_of(work, struct nfp_net, mc_work);
struct nfp_mc_addr_entry *entry, *tmp;
struct list_head tmp_list;
INIT_LIST_HEAD(&tmp_list);
spin_lock_bh(&nn->mc_lock);
list_splice_init(&nn->mc_addrs, &tmp_list);
spin_unlock_bh(&nn->mc_lock);
list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
if (nfp_net_mc_cfg(nn, entry->addr, entry->cmd))
nn_err(nn, "Config mc address to HW failed.\n");
list_del(&entry->list);
kfree(entry);
}
return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
}
static void nfp_net_set_rx_mode(struct net_device *netdev)
......@@ -2681,9 +2683,9 @@ int nfp_net_init(struct nfp_net *nn)
if (!nn->dp.netdev)
return 0;
spin_lock_init(&nn->mc_lock);
INIT_LIST_HEAD(&nn->mc_addrs);
INIT_WORK(&nn->mc_work, nfp_net_mc_addr_config);
spin_lock_init(&nn->mbox_amsg.lock);
INIT_LIST_HEAD(&nn->mbox_amsg.list);
INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
return register_netdev(nn->dp.netdev);
......@@ -2704,6 +2706,6 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
flush_work(&nn->mc_work);
flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
......@@ -403,7 +403,6 @@
*/
#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
#define NFP_NET_CFG_MBOX_VAL 0x1808
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment