Commit ec0c9671 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Last bit of straggler fixes...

  1) Fix btf library licensing to LGPL, from Martin KaFai lau.

  2) Fix error handling in bpf sockmap code, from Daniel Borkmann.

  3) XDP cpumap teardown handling wrt. execution contexts, from Jesper
     Dangaard Brouer.

  4) Fix loss of runtime PM on failed vlan add/del, from Ivan
     Khoronzhuk.

  5) xen-netfront caches skb_shinfo(skb) across a __pskb_pull_tail()
     call, which potentially changes the skb's data buffer, and thus
     skb_shinfo(). Fix from Juergen Gross"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  xen/netfront: don't cache skb_shinfo()
  net: ethernet: ti: cpsw: fix runtime_pm while add/kill vlan
  net: ethernet: ti: cpsw: clear all entries when delete vid
  xdp: fix bug in devmap teardown code path
  samples/bpf: xdp_redirect_cpu adjustment to reproduce teardown race easier
  xdp: fix bug in cpumap teardown code path
  bpf, sockmap: fix cork timeout for select due to epipe
  bpf, sockmap: fix leak in bpf_tcp_sendmsg wait for mem path
  bpf, sockmap: fix bpf_tcp_sendmsg sock error handling
  bpf: btf: Change tools/lib/bpf/btf to LGPL
parents 4f7a7bea d472b3a6
......@@ -2086,14 +2086,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
int i;
for (i = 0; i < cpsw->data.slaves; i++) {
if (vid == cpsw->slaves[i].port_vlan)
return -EINVAL;
if (vid == cpsw->slaves[i].port_vlan) {
ret = -EINVAL;
goto err;
}
}
}
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
ret = cpsw_add_vlan_ale_entry(priv, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
}
......@@ -2119,22 +2121,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
for (i = 0; i < cpsw->data.slaves; i++) {
if (vid == cpsw->slaves[i].port_vlan)
return -EINVAL;
goto err;
}
}
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
if (ret != 0)
return ret;
ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
return ret;
ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
}
......
......@@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -EINVAL;
return -ENOENT;
cpsw_ale_read(ale, idx, ale_entry);
......
......@@ -894,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
......@@ -903,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;
......
......@@ -69,7 +69,7 @@ struct bpf_cpu_map {
};
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
struct xdp_bulk_queue *bq);
struct xdp_bulk_queue *bq, bool in_napi_ctx);
static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
{
......@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
/* No concurrent bq_enqueue can run at this point */
bq_flush_to_queue(rcpu, bq);
bq_flush_to_queue(rcpu, bq, false);
}
free_percpu(rcpu->bulkq);
/* Cannot kthread_stop() here, last put free rcpu resources */
......@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
};
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
struct xdp_bulk_queue *bq)
struct xdp_bulk_queue *bq, bool in_napi_ctx)
{
unsigned int processed = 0, drops = 0;
const int to_cpu = rcpu->cpu;
......@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
err = __ptr_ring_produce(q, xdpf);
if (err) {
drops++;
xdp_return_frame_rx_napi(xdpf);
if (likely(in_napi_ctx))
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
}
processed++;
}
......@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
bq_flush_to_queue(rcpu, bq);
bq_flush_to_queue(rcpu, bq, true);
/* Notice, xdp_buff/page MUST be queued here, long enough for
* driver to code invoking us to finished, due to driver
......@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
/* Flush all frames in bulkq to real queue */
bq = this_cpu_ptr(rcpu->bulkq);
bq_flush_to_queue(rcpu, bq);
bq_flush_to_queue(rcpu, bq, true);
/* If already running, costs spin_lock_irqsave + smb_mb */
wake_up_process(rcpu->kthread);
......
......@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
}
static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_bulk_queue *bq, u32 flags)
struct xdp_bulk_queue *bq, u32 flags,
bool in_napi_ctx)
{
struct net_device *dev = obj->dev;
int sent = 0, drops = 0, err = 0;
......@@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_frame *xdpf = bq->q[i];
/* RX path under NAPI protection, can return frames faster */
xdp_return_frame_rx_napi(xdpf);
if (likely(in_napi_ctx))
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
drops++;
}
goto out;
......@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit(bit, bitmap);
bq = this_cpu_ptr(dev->bulkq);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
}
}
......@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(obj, bq, 0);
bq_xmit_all(obj, bq, 0, true);
/* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed
......@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit(dev->bit, bitmap);
bq = per_cpu_ptr(dev->bulkq, cpu);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
}
}
}
......
......@@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
while (msg_data_left(msg)) {
struct sk_msg_buff *m;
struct sk_msg_buff *m = NULL;
bool enospc = false;
int copy;
if (sk->sk_err) {
err = sk->sk_err;
err = -sk->sk_err;
goto out_err;
}
......@@ -1116,8 +1116,11 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
err = sk_stream_wait_memory(sk, &timeo);
if (err)
if (err) {
if (m && m != psock->cork)
free_start_sg(sk, m);
goto out_err;
}
}
out_err:
if (err < 0)
......
......@@ -14,7 +14,7 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#define MAX_CPUS 12 /* WARNING - sync with _user.c */
#define MAX_CPUS 64 /* WARNING - sync with _user.c */
/* Special map type that can XDP_REDIRECT frames to another CPU */
struct bpf_map_def SEC("maps") cpu_map = {
......
......@@ -19,7 +19,7 @@ static const char *__doc__ =
#include <arpa/inet.h>
#include <linux/if_link.h>
#define MAX_CPUS 12 /* WARNING - sync with _kern.c */
#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
/* How many xdp_progs are defined in _kern.c */
#define MAX_PROG 5
......@@ -527,7 +527,7 @@ static void stress_cpumap(void)
* procedure.
*/
create_cpu_entry(1, 1024, 0, false);
create_cpu_entry(1, 128, 0, false);
create_cpu_entry(1, 8, 0, false);
create_cpu_entry(1, 16000, 0, false);
}
......
/* SPDX-License-Identifier: GPL-2.0 */
// SPDX-License-Identifier: LGPL-2.1
/* Copyright (c) 2018 Facebook */
#include <stdlib.h>
......
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: LGPL-2.1 */
/* Copyright (c) 2018 Facebook */
#ifndef __BPF_BTF_H
......
......@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
while (s->bytes_recvd < total_bytes) {
if (txmsg_cork) {
timeout.tv_sec = 0;
timeout.tv_usec = 1000;
timeout.tv_usec = 300000;
} else {
timeout.tv_sec = 1;
timeout.tv_usec = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment