Commit 7c6327c7 authored by Paolo Abeni's avatar Paolo Abeni

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Conflicts:

net/ax25/af_ax25.c
  d7c4c9e0 ("ax25: fix incorrect dev_tracker usage")
  d62607c3 ("net: rename reference+tracking helpers")

drivers/net/netdevsim/fib.c
  180a6a3e ("netdevsim: fib: Fix reference count leak on route deletion failure")
  012ec02a ("netdevsim: convert driver to use unlocked devlink API during init/fini")
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 6f63d044 8eaa1d11
...@@ -92,6 +92,7 @@ struct iavf_vsi { ...@@ -92,6 +92,7 @@ struct iavf_vsi {
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ #define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
#define IAVF_MBPS_QUANTA 50
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \ #define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
(IAVF_MAX_VF_VSI * \ (IAVF_MAX_VF_VSI * \
...@@ -433,6 +434,11 @@ struct iavf_adapter { ...@@ -433,6 +434,11 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */ /* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock; spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters; u16 num_cloud_filters;
/* snapshot of "num_active_queues" before setup_tc for qdisc add
* is invoked. This information is useful during qdisc del flow,
* to restore correct number of queues
*/
int orig_num_active_queues;
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */ #define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr; u16 fdir_active_fltr;
......
...@@ -3410,6 +3410,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, ...@@ -3410,6 +3410,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio_qopt) struct tc_mqprio_qopt_offload *mqprio_qopt)
{ {
u64 total_max_rate = 0; u64 total_max_rate = 0;
u32 tx_rate_rem = 0;
int i, num_qps = 0; int i, num_qps = 0;
u64 tx_rate = 0; u64 tx_rate = 0;
int ret = 0; int ret = 0;
...@@ -3424,12 +3425,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, ...@@ -3424,12 +3425,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
return -EINVAL; return -EINVAL;
if (mqprio_qopt->min_rate[i]) { if (mqprio_qopt->min_rate[i]) {
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"Invalid min tx rate (greater than 0) specified\n"); "Invalid min tx rate (greater than 0) specified for TC%d\n",
i);
return -EINVAL; return -EINVAL;
} }
/*convert to Mbps */
/* convert to Mbps */
tx_rate = div_u64(mqprio_qopt->max_rate[i], tx_rate = div_u64(mqprio_qopt->max_rate[i],
IAVF_MBPS_DIVISOR); IAVF_MBPS_DIVISOR);
if (mqprio_qopt->max_rate[i] &&
tx_rate < IAVF_MBPS_QUANTA) {
dev_err(&adapter->pdev->dev,
"Invalid max tx rate for TC%d, minimum %dMbps\n",
i, IAVF_MBPS_QUANTA);
return -EINVAL;
}
(void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
if (tx_rate_rem != 0) {
dev_err(&adapter->pdev->dev,
"Invalid max tx rate for TC%d, not divisible by %d\n",
i, IAVF_MBPS_QUANTA);
return -EINVAL;
}
total_max_rate += tx_rate; total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i]; num_qps += mqprio_qopt->qopt.count[i];
} }
...@@ -3496,6 +3517,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) ...@@ -3496,6 +3517,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
netif_tx_disable(netdev); netif_tx_disable(netdev);
iavf_del_all_cloud_filters(adapter); iavf_del_all_cloud_filters(adapter);
adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
total_qps = adapter->orig_num_active_queues;
goto exit; goto exit;
} else { } else {
return -EINVAL; return -EINVAL;
...@@ -3539,7 +3561,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) ...@@ -3539,7 +3561,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
adapter->ch_config.ch_info[i].offset = 0; adapter->ch_config.ch_info[i].offset = 0;
} }
} }
/* Take snapshot of original config such as "num_active_queues"
* It is used later when delete ADQ flow is exercised, so that
* once delete ADQ flow completes, VF shall go back to its
* original queue configuration
*/
adapter->orig_num_active_queues = adapter->num_active_queues;
/* Store queue info based on TC so that VF gets configured
* with correct number of queues when VF completes ADQ config
* flow
*/
adapter->ch_config.total_qps = total_qps; adapter->ch_config.total_qps = total_qps;
netif_tx_stop_all_queues(netdev); netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev); netif_tx_disable(netdev);
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
...@@ -3556,6 +3592,12 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) ...@@ -3556,6 +3592,12 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
} }
} }
exit: exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
return ret; return ret;
} }
......
...@@ -433,7 +433,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ...@@ -433,7 +433,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC; IFF_PROMISC;
goto out_promisc; goto out_promisc;
} }
if (vsi->current_netdev_flags & if (vsi->netdev->features &
NETIF_F_HW_VLAN_CTAG_FILTER) NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_ops->ena_rx_filtering(vsi); vlan_ops->ena_rx_filtering(vsi);
} }
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define OTX2_HEAD_ROOM OTX2_ALIGN #define OTX2_HEAD_ROOM OTX2_ALIGN
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) #define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
#define OTX2_MIN_MTU 64 #define OTX2_MIN_MTU 60
#define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9 #define OTX2_MAX_FRAGS_IN_SQE 9
......
...@@ -109,7 +109,7 @@ struct page_pool; ...@@ -109,7 +109,7 @@ struct page_pool;
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1)) #define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS \ #define MLX5E_MAX_RQ_NUM_MTTS \
((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
...@@ -174,8 +174,8 @@ struct page_pool; ...@@ -174,8 +174,8 @@ struct page_pool;
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT) ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \ #define MLX5E_MAX_KLM_PER_WQE(mdev) \
MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \ MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
<< MLX5_MKEY_BSF_OCTO_SIZE) mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
...@@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev) ...@@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB); MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
} }
static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs) static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
{ {
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS. /* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
* Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16, * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
...@@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs) ...@@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
* than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
* cache-aligned. * cache-aligned.
*/ */
#if L1_CACHE_BYTES < 128 u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
#else #if L1_CACHE_BYTES >= 128
return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2); wqebbs = ALIGN_DOWN(wqebbs, 2);
#endif #endif
return wqebbs;
} }
struct mlx5e_tx_wqe { struct mlx5e_tx_wqe {
...@@ -456,7 +457,7 @@ struct mlx5e_txqsq { ...@@ -456,7 +457,7 @@ struct mlx5e_txqsq {
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
u16 stop_room; u16 stop_room;
u16 max_sq_mpw_wqebbs; u8 max_sq_mpw_wqebbs;
u8 min_inline_mode; u8 min_inline_mode;
struct device *pdev; struct device *pdev;
__be32 mkey_be; __be32 mkey_be;
...@@ -571,7 +572,7 @@ struct mlx5e_xdpsq { ...@@ -571,7 +572,7 @@ struct mlx5e_xdpsq {
struct device *pdev; struct device *pdev;
__be32 mkey_be; __be32 mkey_be;
u16 stop_room; u16 stop_room;
u16 max_sq_mpw_wqebbs; u8 max_sq_mpw_wqebbs;
u8 min_inline_mode; u8 min_inline_mode;
unsigned long state; unsigned long state;
unsigned int hw_mtu; unsigned int hw_mtu;
......
...@@ -790,8 +790,20 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, ...@@ -790,8 +790,20 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc)); wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
/* If XDP program is attached, XSK may be turned on at any time without
* restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
* both regular RQ and XSK RQ.
* Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
* doesn't affect its return value, as long as params->xdp_prog != NULL,
* so we can just multiply by 2.
*/
if (params->xdp_prog)
wqebbs *= 2;
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp); wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
} }
......
...@@ -128,6 +128,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at ...@@ -128,6 +128,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
post_attr->inner_match_level = MLX5_MATCH_NONE; post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE; post_attr->outer_match_level = MLX5_MATCH_NONE;
post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP; post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
handle->ns_type = post_act->ns_type; handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */ /* Splits were handled before post action */
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include "en.h" #include "en.h"
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL
/* RX data path */ /* RX data path */
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
...@@ -21,6 +23,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, ...@@ -21,6 +23,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info) struct mlx5e_dma_info *dma_info)
{ {
retry:
dma_info->xsk = xsk_buff_alloc(rq->xsk_pool); dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk) if (!dma_info->xsk)
return -ENOMEM; return -ENOMEM;
...@@ -32,6 +35,17 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, ...@@ -32,6 +35,17 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
*/ */
dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk); dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
/* MTT page mapping has alignment requirements. If they are not
* satisfied, leak the descriptor so that it won't come again, and try
* to allocate a new one.
*/
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) {
xsk_buff_discard(dma_info->xsk);
goto retry;
}
}
return 0; return 0;
} }
......
...@@ -54,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, ...@@ -54,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info))) if (!mlx5e_ktls_type_check(mdev, crypto_info))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) if (direction == TLS_OFFLOAD_CTX_DIR_TX)
......
...@@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, ...@@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
} }
static void static void
esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_flow_act *flow_act, struct mlx5_fs_chains *chains, int i)
struct mlx5_fs_chains *chains,
int i)
{ {
if (mlx5_chains_ignore_flow_level_supported(chains)) if (mlx5_chains_ignore_flow_level_supported(chains))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
...@@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, ...@@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
dest[i].ft = mlx5_chains_get_tc_end_ft(chains); dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
} }
static void
esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, int i)
{
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = esw->fdb_table.offloads.slow_fdb;
}
static int static int
esw_setup_chain_dest(struct mlx5_flow_destination *dest, esw_setup_chain_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act, struct mlx5_flow_act *flow_act,
...@@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest, ...@@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) { } else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++; (*i)++;
} else if (mlx5e_tc_attr_flags_skip(attr->flags)) { } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
esw_setup_slow_path_dest(dest, flow_act, chains, *i); esw_setup_slow_path_dest(dest, flow_act, esw, *i);
(*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++; (*i)++;
} else if (attr->dest_chain) { } else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
......
...@@ -32,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type ...@@ -32,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val; dev->timeouts->to[type] = val;
} }
void mlx5_tout_set_def_val(struct mlx5_core_dev *dev) int mlx5_tout_init(struct mlx5_core_dev *dev)
{ {
int i; int i;
for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
tout_set(dev, tout_def_sw_val[i], i);
}
int mlx5_tout_init(struct mlx5_core_dev *dev)
{
dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL); dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
if (!dev->timeouts) if (!dev->timeouts)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
tout_set(dev, tout_def_sw_val[i], i);
return 0; return 0;
} }
......
...@@ -35,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev); ...@@ -35,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev); void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev); void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev); int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type); u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS) #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
......
...@@ -544,7 +544,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) ...@@ -544,7 +544,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */ /* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp)); prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp, prof->log_max_qp,
...@@ -1050,8 +1050,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout) ...@@ -1050,8 +1050,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
if (mlx5_core_is_pf(dev)) if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev); pcie_print_link_status(dev->pdev);
mlx5_tout_set_def_val(dev);
/* wait for firmware to accept initialization segments configurations /* wait for firmware to accept initialization segments configurations
*/ */
err = wait_fw_init(dev, timeout, err = wait_fw_init(dev, timeout,
......
...@@ -21,10 +21,11 @@ enum dr_dump_rec_type { ...@@ -21,10 +21,11 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_TABLE_TX = 3102, DR_DUMP_REC_TYPE_TABLE_TX = 3102,
DR_DUMP_REC_TYPE_MATCHER = 3200, DR_DUMP_REC_TYPE_MATCHER = 3200,
DR_DUMP_REC_TYPE_MATCHER_MASK = 3201, DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201,
DR_DUMP_REC_TYPE_MATCHER_RX = 3202, DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
DR_DUMP_REC_TYPE_MATCHER_TX = 3203, DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204, DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
DR_DUMP_REC_TYPE_MATCHER_MASK = 3205,
DR_DUMP_REC_TYPE_RULE = 3300, DR_DUMP_REC_TYPE_RULE = 3300,
DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301, DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
...@@ -114,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, ...@@ -114,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
break; break;
case DR_ACTION_TYP_FT: case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl) if (action->dest_tbl->is_fw_tbl)
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id, DR_DUMP_REC_TYPE_ACTION_FT, action_id,
rule_id, action->dest_tbl->fw_tbl.id); rule_id, action->dest_tbl->fw_tbl.id,
-1);
else else
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id, DR_DUMP_REC_TYPE_ACTION_FT, action_id,
rule_id, action->dest_tbl->tbl->table_id); rule_id, action->dest_tbl->tbl->table_id,
DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
break; break;
case DR_ACTION_TYP_CTR: case DR_ACTION_TYP_CTR:
......
...@@ -1437,7 +1437,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif, ...@@ -1437,7 +1437,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
if ((vlan_flags & features) && if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
......
...@@ -54,6 +54,7 @@ struct nsim_fib_data { ...@@ -54,6 +54,7 @@ struct nsim_fib_data {
struct rhashtable nexthop_ht; struct rhashtable nexthop_ht;
struct devlink *devlink; struct devlink *devlink;
struct work_struct fib_event_work; struct work_struct fib_event_work;
struct work_struct fib_flush_work;
struct list_head fib_event_queue; struct list_head fib_event_queue;
spinlock_t fib_event_queue_lock; /* Protects fib event queue list */ spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
struct mutex nh_lock; /* Protects NH HT */ struct mutex nh_lock; /* Protects NH HT */
...@@ -61,6 +62,7 @@ struct nsim_fib_data { ...@@ -61,6 +62,7 @@ struct nsim_fib_data {
bool fail_route_offload; bool fail_route_offload;
bool fail_res_nexthop_group_replace; bool fail_res_nexthop_group_replace;
bool fail_nexthop_bucket_replace; bool fail_nexthop_bucket_replace;
bool fail_route_delete;
}; };
struct nsim_fib_rt_key { struct nsim_fib_rt_key {
...@@ -914,6 +916,10 @@ static int nsim_fib4_prepare_event(struct fib_notifier_info *info, ...@@ -914,6 +916,10 @@ static int nsim_fib4_prepare_event(struct fib_notifier_info *info,
} }
break; break;
case FIB_EVENT_ENTRY_DEL: case FIB_EVENT_ENTRY_DEL:
if (data->fail_route_delete) {
NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion");
return -EINVAL;
}
nsim_fib_account(&data->ipv4.fib, false); nsim_fib_account(&data->ipv4.fib, false);
break; break;
} }
...@@ -952,6 +958,11 @@ static int nsim_fib6_prepare_event(struct fib_notifier_info *info, ...@@ -952,6 +958,11 @@ static int nsim_fib6_prepare_event(struct fib_notifier_info *info,
} }
break; break;
case FIB_EVENT_ENTRY_DEL: case FIB_EVENT_ENTRY_DEL:
if (data->fail_route_delete) {
err = -EINVAL;
NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion");
goto err_fib6_event_fini;
}
nsim_fib_account(&data->ipv6.fib, false); nsim_fib_account(&data->ipv6.fib, false);
break; break;
} }
...@@ -978,7 +989,7 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data, ...@@ -978,7 +989,7 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data,
fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC); fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
if (!fib_event) if (!fib_event)
return NOTIFY_BAD; goto err_fib_event_alloc;
fib_event->data = data; fib_event->data = data;
fib_event->event = event; fib_event->event = event;
...@@ -1006,6 +1017,9 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data, ...@@ -1006,6 +1017,9 @@ static int nsim_fib_event_schedule_work(struct nsim_fib_data *data,
err_fib_prepare_event: err_fib_prepare_event:
kfree(fib_event); kfree(fib_event);
err_fib_event_alloc:
if (event == FIB_EVENT_ENTRY_DEL)
schedule_work(&data->fib_flush_work);
return NOTIFY_BAD; return NOTIFY_BAD;
} }
...@@ -1483,6 +1497,24 @@ static void nsim_fib_event_work(struct work_struct *work) ...@@ -1483,6 +1497,24 @@ static void nsim_fib_event_work(struct work_struct *work)
mutex_unlock(&data->fib_lock); mutex_unlock(&data->fib_lock);
} }
static void nsim_fib_flush_work(struct work_struct *work)
{
struct nsim_fib_data *data = container_of(work, struct nsim_fib_data,
fib_flush_work);
struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
/* Process pending work. */
flush_work(&data->fib_event_work);
mutex_lock(&data->fib_lock);
list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
nsim_fib_rt_ht_params);
nsim_fib_rt_free(fib_rt, data);
}
mutex_unlock(&data->fib_lock);
}
static int static int
nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev) nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
{ {
...@@ -1504,6 +1536,10 @@ nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev) ...@@ -1504,6 +1536,10 @@ nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir, debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir,
data, &nsim_nexthop_bucket_activity_fops); data, &nsim_nexthop_bucket_activity_fops);
data->fail_route_delete = false;
debugfs_create_bool("fail_route_delete", 0600, data->ddir,
&data->fail_route_delete);
return 0; return 0;
} }
...@@ -1541,6 +1577,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, ...@@ -1541,6 +1577,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
goto err_rhashtable_nexthop_destroy; goto err_rhashtable_nexthop_destroy;
INIT_WORK(&data->fib_event_work, nsim_fib_event_work); INIT_WORK(&data->fib_event_work, nsim_fib_event_work);
INIT_WORK(&data->fib_flush_work, nsim_fib_flush_work);
INIT_LIST_HEAD(&data->fib_event_queue); INIT_LIST_HEAD(&data->fib_event_queue);
spin_lock_init(&data->fib_event_queue_lock); spin_lock_init(&data->fib_event_queue_lock);
...@@ -1587,6 +1624,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, ...@@ -1587,6 +1624,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
err_nexthop_nb_unregister: err_nexthop_nb_unregister:
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb); unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
err_rhashtable_fib_destroy: err_rhashtable_fib_destroy:
cancel_work_sync(&data->fib_flush_work);
flush_work(&data->fib_event_work); flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free, rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data); data);
...@@ -1616,6 +1654,7 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data) ...@@ -1616,6 +1654,7 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
NSIM_RESOURCE_IPV4_FIB); NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb); unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb); unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
cancel_work_sync(&data->fib_flush_work);
flush_work(&data->fib_event_work); flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free, rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data); data);
......
...@@ -637,8 +637,9 @@ config USB_NET_AQC111 ...@@ -637,8 +637,9 @@ config USB_NET_AQC111
* Aquantia AQtion USB to 5GbE * Aquantia AQtion USB to 5GbE
config USB_RTL8153_ECM config USB_RTL8153_ECM
tristate "RTL8153 ECM support" tristate
depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n) depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
default y
help help
This option supports ECM mode for RTL8153 ethernet adapter, when This option supports ECM mode for RTL8153 ethernet adapter, when
CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
......
...@@ -1844,55 +1844,55 @@ static const struct driver_info at_umc2000sp_info = { ...@@ -1844,55 +1844,55 @@ static const struct driver_info at_umc2000sp_info = {
static const struct usb_device_id products[] = { static const struct usb_device_id products[] = {
{ {
/* ASIX AX88179 10/100/1000 */ /* ASIX AX88179 10/100/1000 */
USB_DEVICE(0x0b95, 0x1790), USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x1790, 0xff, 0xff, 0),
.driver_info = (unsigned long)&ax88179_info, .driver_info = (unsigned long)&ax88179_info,
}, { }, {
/* ASIX AX88178A 10/100/1000 */ /* ASIX AX88178A 10/100/1000 */
USB_DEVICE(0x0b95, 0x178a), USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x178a, 0xff, 0xff, 0),
.driver_info = (unsigned long)&ax88178a_info, .driver_info = (unsigned long)&ax88178a_info,
}, { }, {
/* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */ /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
USB_DEVICE(0x04b4, 0x3610), USB_DEVICE_AND_INTERFACE_INFO(0x04b4, 0x3610, 0xff, 0xff, 0),
.driver_info = (unsigned long)&cypress_GX3_info, .driver_info = (unsigned long)&cypress_GX3_info,
}, { }, {
/* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
USB_DEVICE(0x2001, 0x4a00), USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x4a00, 0xff, 0xff, 0),
.driver_info = (unsigned long)&dlink_dub1312_info, .driver_info = (unsigned long)&dlink_dub1312_info,
}, { }, {
/* Sitecom USB 3.0 to Gigabit Adapter */ /* Sitecom USB 3.0 to Gigabit Adapter */
USB_DEVICE(0x0df6, 0x0072), USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0072, 0xff, 0xff, 0),
.driver_info = (unsigned long)&sitecom_info, .driver_info = (unsigned long)&sitecom_info,
}, { }, {
/* Samsung USB Ethernet Adapter */ /* Samsung USB Ethernet Adapter */
USB_DEVICE(0x04e8, 0xa100), USB_DEVICE_AND_INTERFACE_INFO(0x04e8, 0xa100, 0xff, 0xff, 0),
.driver_info = (unsigned long)&samsung_info, .driver_info = (unsigned long)&samsung_info,
}, { }, {
/* Lenovo OneLinkDock Gigabit LAN */ /* Lenovo OneLinkDock Gigabit LAN */
USB_DEVICE(0x17ef, 0x304b), USB_DEVICE_AND_INTERFACE_INFO(0x17ef, 0x304b, 0xff, 0xff, 0),
.driver_info = (unsigned long)&lenovo_info, .driver_info = (unsigned long)&lenovo_info,
}, { }, {
/* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */ /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */
USB_DEVICE(0x050d, 0x0128), USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x0128, 0xff, 0xff, 0),
.driver_info = (unsigned long)&belkin_info, .driver_info = (unsigned long)&belkin_info,
}, { }, {
/* Toshiba USB 3.0 GBit Ethernet Adapter */ /* Toshiba USB 3.0 GBit Ethernet Adapter */
USB_DEVICE(0x0930, 0x0a13), USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x0a13, 0xff, 0xff, 0),
.driver_info = (unsigned long)&toshiba_info, .driver_info = (unsigned long)&toshiba_info,
}, { }, {
/* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */ /* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */
USB_DEVICE(0x0711, 0x0179), USB_DEVICE_AND_INTERFACE_INFO(0x0711, 0x0179, 0xff, 0xff, 0),
.driver_info = (unsigned long)&mct_info, .driver_info = (unsigned long)&mct_info,
}, { }, {
/* Allied Telesis AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */ /* Allied Telesis AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */
USB_DEVICE(0x07c9, 0x000e), USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000e, 0xff, 0xff, 0),
.driver_info = (unsigned long)&at_umc2000_info, .driver_info = (unsigned long)&at_umc2000_info,
}, { }, {
/* Allied Telesis AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter */ /* Allied Telesis AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter */
USB_DEVICE(0x07c9, 0x000f), USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000f, 0xff, 0xff, 0),
.driver_info = (unsigned long)&at_umc200_info, .driver_info = (unsigned long)&at_umc200_info,
}, { }, {
/* Allied Telesis AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */ /* Allied Telesis AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */
USB_DEVICE(0x07c9, 0x0010), USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x0010, 0xff, 0xff, 0),
.driver_info = (unsigned long)&at_umc2000sp_info, .driver_info = (unsigned long)&at_umc2000sp_info,
}, },
{ }, { },
......
...@@ -236,6 +236,7 @@ typedef struct ax25_cb { ...@@ -236,6 +236,7 @@ typedef struct ax25_cb {
ax25_address source_addr, dest_addr; ax25_address source_addr, dest_addr;
ax25_digi *digipeat; ax25_digi *digipeat;
ax25_dev *ax25_dev; ax25_dev *ax25_dev;
netdevice_tracker dev_tracker;
unsigned char iamdigi; unsigned char iamdigi;
unsigned char state, modulus, pidincl; unsigned char state, modulus, pidincl;
unsigned short vs, vr, va; unsigned short vs, vr, va;
......
...@@ -110,8 +110,6 @@ static inline bool inet6_match(struct net *net, const struct sock *sk, ...@@ -110,8 +110,6 @@ static inline bool inet6_match(struct net *net, const struct sock *sk,
const __portpair ports, const __portpair ports,
const int dif, const int sdif) const int dif, const int sdif)
{ {
int bound_dev_if;
if (!net_eq(sock_net(sk), net) || if (!net_eq(sock_net(sk), net) ||
sk->sk_family != AF_INET6 || sk->sk_family != AF_INET6 ||
sk->sk_portpair != ports || sk->sk_portpair != ports ||
...@@ -119,8 +117,9 @@ static inline bool inet6_match(struct net *net, const struct sock *sk, ...@@ -119,8 +117,9 @@ static inline bool inet6_match(struct net *net, const struct sock *sk,
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return false; return false;
bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
return bound_dev_if == dif || bound_dev_if == sdif; return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
sdif);
} }
#endif /* IS_ENABLED(CONFIG_IPV6) */ #endif /* IS_ENABLED(CONFIG_IPV6) */
......
...@@ -175,17 +175,6 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) ...@@ -175,17 +175,6 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
hashinfo->ehash_locks = NULL; hashinfo->ehash_locks = NULL;
} }
static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept),
bound_dev_if, dif, sdif);
#else
return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
#endif
}
struct inet_bind_bucket * struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head, struct inet_bind_hashbucket *head,
...@@ -271,16 +260,14 @@ static inline bool inet_match(struct net *net, const struct sock *sk, ...@@ -271,16 +260,14 @@ static inline bool inet_match(struct net *net, const struct sock *sk,
const __addrpair cookie, const __portpair ports, const __addrpair cookie, const __portpair ports,
int dif, int sdif) int dif, int sdif)
{ {
int bound_dev_if;
if (!net_eq(sock_net(sk), net) || if (!net_eq(sock_net(sk), net) ||
sk->sk_portpair != ports || sk->sk_portpair != ports ||
sk->sk_addrpair != cookie) sk->sk_addrpair != cookie)
return false; return false;
/* Paired with WRITE_ONCE() from sock_bindtoindex_locked() */ /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
return bound_dev_if == dif || bound_dev_if == sdif; sdif);
} }
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
......
...@@ -149,6 +149,17 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if, ...@@ -149,6 +149,17 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
return bound_dev_if == dif || bound_dev_if == sdif; return bound_dev_if == dif || bound_dev_if == sdif;
} }
static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept),
bound_dev_if, dif, sdif);
#else
return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
#endif
}
struct inet_cork { struct inet_cork {
unsigned int flags; unsigned int flags;
__be32 addr; __be32 addr;
......
...@@ -104,6 +104,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) ...@@ -104,6 +104,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb); xp_free(xskb);
} }
static inline void xsk_buff_discard(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
xp_release(xskb);
}
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{ {
xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
...@@ -252,6 +259,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) ...@@ -252,6 +259,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{ {
} }
static inline void xsk_buff_discard(struct xdp_buff *xdp)
{
}
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{ {
} }
......
...@@ -1066,7 +1066,7 @@ static int ax25_release(struct socket *sock) ...@@ -1066,7 +1066,7 @@ static int ax25_release(struct socket *sock)
del_timer_sync(&ax25->t3timer); del_timer_sync(&ax25->t3timer);
del_timer_sync(&ax25->idletimer); del_timer_sync(&ax25->idletimer);
} }
netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker); netdev_put(ax25_dev->dev, &ax25->dev_tracker);
ax25_dev_put(ax25_dev); ax25_dev_put(ax25_dev);
} }
...@@ -1147,7 +1147,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) ...@@ -1147,7 +1147,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (ax25_dev) { if (ax25_dev) {
ax25_fillin_cb(ax25, ax25_dev); ax25_fillin_cb(ax25, ax25_dev);
netdev_hold(ax25_dev->dev, &ax25_dev->dev_tracker, GFP_ATOMIC); netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
} }
done: done:
......
...@@ -736,11 +736,6 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ...@@ -736,11 +736,6 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
lock_sock(sk); lock_sock(sk);
if (dccp_qpolicy_full(sk)) {
rc = -EAGAIN;
goto out_release;
}
timeo = sock_sndtimeo(sk, noblock); timeo = sock_sndtimeo(sk, noblock);
/* /*
...@@ -759,6 +754,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ...@@ -759,6 +754,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (skb == NULL) if (skb == NULL)
goto out_release; goto out_release;
if (dccp_qpolicy_full(sk)) {
rc = -EAGAIN;
goto out_discard;
}
if (sk->sk_state == DCCP_CLOSED) { if (sk->sk_state == DCCP_CLOSED) {
rc = -ENOTCONN; rc = -ENOTCONN;
goto out_discard; goto out_discard;
......
...@@ -742,7 +742,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -742,7 +742,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
NULL, 0, rs, &local_odp_mr->r_key, NULL, NULL, 0, rs, &local_odp_mr->r_key, NULL,
iov->addr, iov->bytes, ODP_VIRTUAL); iov->addr, iov->bytes, ODP_VIRTUAL);
if (IS_ERR(local_odp_mr->r_trans_private)) { if (IS_ERR(local_odp_mr->r_trans_private)) {
ret = IS_ERR(local_odp_mr->r_trans_private); ret = PTR_ERR(local_odp_mr->r_trans_private);
rdsdebug("get_mr ret %d %p\"", ret, rdsdebug("get_mr ret %d %p\"", ret,
local_odp_mr->r_trans_private); local_odp_mr->r_trans_private);
kfree(local_odp_mr); kfree(local_odp_mr);
......
...@@ -16,6 +16,7 @@ ALL_TESTS=" ...@@ -16,6 +16,7 @@ ALL_TESTS="
ipv4_replay ipv4_replay
ipv4_flush ipv4_flush
ipv4_error_path ipv4_error_path
ipv4_delete_fail
ipv6_add ipv6_add
ipv6_metric ipv6_metric
ipv6_append_single ipv6_append_single
...@@ -29,11 +30,13 @@ ALL_TESTS=" ...@@ -29,11 +30,13 @@ ALL_TESTS="
ipv6_replay_single ipv6_replay_single
ipv6_replay_multipath ipv6_replay_multipath
ipv6_error_path ipv6_error_path
ipv6_delete_fail
" "
NETDEVSIM_PATH=/sys/bus/netdevsim/ NETDEVSIM_PATH=/sys/bus/netdevsim/
DEV_ADDR=1337 DEV_ADDR=1337
DEV=netdevsim${DEV_ADDR} DEV=netdevsim${DEV_ADDR}
SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/ SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/
DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV/
NUM_NETIFS=0 NUM_NETIFS=0
source $lib_dir/lib.sh source $lib_dir/lib.sh
source $lib_dir/fib_offload_lib.sh source $lib_dir/fib_offload_lib.sh
...@@ -157,6 +160,27 @@ ipv4_error_path() ...@@ -157,6 +160,27 @@ ipv4_error_path()
ipv4_error_path_replay ipv4_error_path_replay
} }
ipv4_delete_fail()
{
RET=0
echo "y" > $DEBUGFS_DIR/fib/fail_route_delete
ip -n testns1 link add name dummy1 type dummy
ip -n testns1 link set dev dummy1 up
ip -n testns1 route add 192.0.2.0/24 dev dummy1
ip -n testns1 route del 192.0.2.0/24 dev dummy1 &> /dev/null
# We should not be able to delete the netdev if we are leaking a
# reference.
ip -n testns1 link del dev dummy1
log_test "IPv4 route delete failure"
echo "n" > $DEBUGFS_DIR/fib/fail_route_delete
}
ipv6_add() ipv6_add()
{ {
fib_ipv6_add_test "testns1" fib_ipv6_add_test "testns1"
...@@ -304,6 +328,27 @@ ipv6_error_path() ...@@ -304,6 +328,27 @@ ipv6_error_path()
ipv6_error_path_replay ipv6_error_path_replay
} }
ipv6_delete_fail()
{
RET=0
echo "y" > $DEBUGFS_DIR/fib/fail_route_delete
ip -n testns1 link add name dummy1 type dummy
ip -n testns1 link set dev dummy1 up
ip -n testns1 route add 2001:db8:1::/64 dev dummy1
ip -n testns1 route del 2001:db8:1::/64 dev dummy1 &> /dev/null
# We should not be able to delete the netdev if we are leaking a
# reference.
ip -n testns1 link del dev dummy1
log_test "IPv6 route delete failure"
echo "n" > $DEBUGFS_DIR/fib/fail_route_delete
}
fib_notify_on_flag_change_set() fib_notify_on_flag_change_set()
{ {
local notify=$1; shift local notify=$1; shift
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment