Commit 057a650b authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Don't race in IPSEC dumps, from Yuejie Shi.

 2) Verify lengths properly in IPSEC reqeusts, from Herbert Xu.

 3) Fix out of bounds access in ipv6 segment routing code, from David
    Lebrun.

 4) Don't write into the header of cloned SKBs in smsc95xx driver, from
    James Hughes.

 5) Several other drivers have this bug too, fix them. From Eric
    Dumazet.

 6) Fix access to uninitialized data in TC action cookie code, from
    Wolfgang Bumiller.

 7) Fix double free in IPV6 segment routing, again from David Lebrun.

 8) Don't let userspace set the RTF_PCPU flag, oops. From David Ahern.

 9) Fix use after free in qrtr code, from Dan Carpenter.

10) Don't double-destroy devices in ip6mr code, from Nikolay
    Aleksandrov.

11) Don't pass out-of-range TX queue indices into drivers, from Tushar
    Dave.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits)
  netpoll: Check for skb->queue_mapping
  ip6mr: fix notification device destruction
  bpf, doc: update bpf maintainers entry
  net: qrtr: potential use after free in qrtr_sendmsg()
  bpf: Fix values type used in test_maps
  net: ipv6: RTF_PCPU should not be settable from userspace
  gso: Validate assumption of frag_list segementation
  kaweth: use skb_cow_head() to deal with cloned skbs
  ch9200: use skb_cow_head() to deal with cloned skbs
  lan78xx: use skb_cow_head() to deal with cloned skbs
  sr9700: use skb_cow_head() to deal with cloned skbs
  cx82310_eth: use skb_cow_head() to deal with cloned skbs
  smsc75xx: use skb_cow_head() to deal with cloned skbs
  ipv6: sr: fix double free of skb after handling invalid SRH
  MAINTAINERS: Add "B:" field for networking.
  net sched actions: allocate act cookie early
  qed: Fix issue in populating the PFC config paramters.
  qed: Fix possible system hang in the dcbnl-getdcbx() path.
  qed: Fix sending an invalid PFC error mask to MFW.
  qed: Fix possible error in populating max_tc field.
  ...
parents 92b4fc75 c70b17b7
......@@ -2585,12 +2585,26 @@ F: include/uapi/linux/if_bonding.h
BPF (Safe dynamic programs and tools)
M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/net/bpf_jit*
F: Documentation/networking/filter.txt
F: include/linux/bpf*
F: include/linux/filter.h
F: include/uapi/linux/bpf*
F: include/uapi/linux/filter.h
F: kernel/bpf/
F: tools/testing/selftests/bpf/
F: kernel/trace/bpf_trace.c
F: lib/test_bpf.c
F: net/bpf/
F: net/core/filter.c
F: net/sched/act_bpf.c
F: net/sched/cls_bpf.c
F: samples/bpf/
F: tools/net/bpf*
F: tools/testing/selftests/bpf/
BROADCOM B44 10/100 ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
......@@ -8761,6 +8775,7 @@ W: http://www.linuxfoundation.org/en/Net
Q: http://patchwork.ozlabs.org/project/netdev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
B: mailto:netdev@vger.kernel.org
S: Maintained
F: net/
F: include/net/
......@@ -12464,7 +12479,6 @@ F: drivers/clk/ti/
F: include/linux/clk/ti.h
TI ETHERNET SWITCH DRIVER (CPSW)
M: Mugunthan V N <mugunthanvnm@ti.com>
R: Grygorii Strashko <grygorii.strashko@ti.com>
L: linux-omap@vger.kernel.org
L: netdev@vger.kernel.org
......
......@@ -583,6 +583,13 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
p_params->ets_cbs,
p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
if (p_params->ets_enabled && !p_params->max_ets_tc) {
p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
"ETS params: max_ets_tc is forced to %d\n",
p_params->max_ets_tc);
}
/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
* encoded in a type u32 array of size 2.
*/
......@@ -1001,6 +1008,8 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
u8 pfc_map = 0;
int i;
*pfc &= ~DCBX_PFC_ERROR_MASK;
if (p_params->pfc.willing)
*pfc |= DCBX_PFC_WILLING_MASK;
else
......@@ -1255,7 +1264,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
{
struct qed_dcbx_get *dcbx_info;
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
if (!dcbx_info)
return NULL;
......@@ -2073,6 +2082,8 @@ static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EINVAL;
......
......@@ -1127,41 +1127,90 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = sh_get_mdio,
};
/* free skb and descriptor buffer */
static void sh_eth_ring_free(struct net_device *ndev)
/* free Tx skb function */
static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ringsize, i;
struct sh_eth_txdesc *txdesc;
int free_num = 0;
int entry;
bool sent;
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
for (i = 0; i < mdp->num_rx_ring; i++)
dev_kfree_skb(mdp->rx_skbuff[i]);
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
entry = mdp->dirty_tx % mdp->num_tx_ring;
txdesc = &mdp->tx_ring[entry];
sent = !(txdesc->status & cpu_to_le32(TD_TACT));
if (sent_only && !sent)
break;
/* TACT bit must be checked before all the following reads */
dma_rmb();
netif_info(mdp, tx_done, ndev,
"tx entry %d status 0x%08x\n",
entry, le32_to_cpu(txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
le32_to_cpu(txdesc->len) >> 16,
DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
free_num++;
}
kfree(mdp->rx_skbuff);
mdp->rx_skbuff = NULL;
txdesc->status = cpu_to_le32(TD_TFP);
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_le32(TD_TDLE);
/* Free Tx skb ringbuffer */
if (mdp->tx_skbuff) {
for (i = 0; i < mdp->num_tx_ring; i++)
dev_kfree_skb(mdp->tx_skbuff[i]);
if (sent) {
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
}
kfree(mdp->tx_skbuff);
mdp->tx_skbuff = NULL;
}
return free_num;
}
/* free skb and descriptor buffer */
static void sh_eth_ring_free(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ringsize, i;
if (mdp->rx_ring) {
for (i = 0; i < mdp->num_rx_ring; i++) {
if (mdp->rx_skbuff[i]) {
struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
dma_unmap_single(&ndev->dev,
le32_to_cpu(rxdesc->addr),
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
}
}
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
dma_free_coherent(NULL, ringsize, mdp->rx_ring,
mdp->rx_desc_dma);
mdp->rx_ring = NULL;
}
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
for (i = 0; i < mdp->num_rx_ring; i++)
dev_kfree_skb(mdp->rx_skbuff[i]);
}
kfree(mdp->rx_skbuff);
mdp->rx_skbuff = NULL;
if (mdp->tx_ring) {
sh_eth_tx_free(ndev, false);
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
mdp->tx_desc_dma);
mdp->tx_ring = NULL;
}
/* Free Tx skb ringbuffer */
kfree(mdp->tx_skbuff);
mdp->tx_skbuff = NULL;
}
/* format skb and descriptor buffer */
......@@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
update_mac_address(ndev);
}
/* free Tx skb function */
static int sh_eth_txfree(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
int free_num = 0;
int entry;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
entry = mdp->dirty_tx % mdp->num_tx_ring;
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_le32(TD_TACT))
break;
/* TACT bit must be checked before all the following reads */
dma_rmb();
netif_info(mdp, tx_done, ndev,
"tx entry %d status 0x%08x\n",
entry, le32_to_cpu(txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
le32_to_cpu(txdesc->len) >> 16,
DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
free_num++;
}
txdesc->status = cpu_to_le32(TD_TFP);
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_le32(TD_TDLE);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
}
return free_num;
}
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
{
......@@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
intr_status, mdp->cur_tx, mdp->dirty_tx,
(u32)ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
sh_eth_tx_free(ndev, true);
/* SH7712 BUG */
if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
......@@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
/* Clear Tx interrupts */
sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
sh_eth_txfree(ndev);
sh_eth_tx_free(ndev, true);
netif_wake_queue(ndev);
}
......@@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
if (!sh_eth_txfree(ndev)) {
if (!sh_eth_tx_free(ndev, true)) {
netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
......
......@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
} else {
netif_rx_ni(skb);
}
return true;
......
......@@ -254,13 +254,8 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
tx_overhead = 0x40;
len = skb->len;
if (skb_headroom(skb) < tx_overhead) {
struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
if (skb_cow_head(skb, tx_overhead)) {
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
......
......@@ -293,11 +293,8 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
{
int len = skb->len;
if (skb_headroom(skb) < 2) {
struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
if (skb_cow_head(skb, 2)) {
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
skb_push(skb, 2);
......
......@@ -803,19 +803,13 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
}
/* We now decide whether we can put our special header into the sk_buff */
if (skb_cloned(skb) || skb_headroom(skb) < 2) {
/* no such luck - we make our own */
struct sk_buff *copied_skb;
copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
dev_kfree_skb_irq(skb);
skb = copied_skb;
if (!copied_skb) {
if (skb_cow_head(skb, 2)) {
kaweth->stats.tx_errors++;
netif_start_queue(net);
spin_unlock_irq(&kaweth->device_lock);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
private_header = (__le16 *)__skb_push(skb, 2);
*private_header = cpu_to_le16(skb->len-2);
......
......@@ -2607,13 +2607,8 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
{
u32 tx_cmd_a, tx_cmd_b;
if (skb_headroom(skb) < TX_OVERHEAD) {
struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
if (skb_cow_head(skb, TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
......
......@@ -2203,12 +2203,8 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
{
u32 tx_cmd_a, tx_cmd_b;
if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
struct sk_buff *skb2 =
skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
......
......@@ -2001,12 +2001,12 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
/* We do not advertise SG, so skbs should be already linearized */
BUG_ON(skb_shinfo(skb)->nr_frags);
if (skb_headroom(skb) < overhead) {
struct sk_buff *skb2 = skb_copy_expand(skb,
overhead, 0, flags);
/* Make writable and expand header space by overhead if required */
if (skb_cow_head(skb, overhead)) {
/* Must deallocate here as returning NULL to indicate error
* means the skb won't be deallocated in the caller.
*/
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
......
......@@ -456,13 +456,8 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
len = skb->len;
if (skb_headroom(skb) < SR_TX_OVERHEAD) {
struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
......
......@@ -35,7 +35,7 @@
#define RTF_PREF(pref) ((pref) << 27)
#define RTF_PREF_MASK 0x18000000
#define RTF_PCPU 0x40000000
#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */
#define RTF_LOCAL 0x80000000
......
......@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
while ((skb = skb_dequeue(&npinfo->txq))) {
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
unsigned int q_index;
if (!netif_device_present(dev) || !netif_running(dev)) {
kfree_skb(skb);
continue;
}
txq = skb_get_tx_queue(dev, skb);
local_irq_save(flags);
/* check if skb->queue_mapping is still valid */
q_index = skb_get_queue_mapping(skb);
if (unlikely(q_index >= dev->real_num_tx_queues)) {
q_index = q_index % dev->real_num_tx_queues;
skb_set_queue_mapping(skb, q_index);
}
txq = netdev_get_tx_queue(dev, q_index);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) ||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
......
......@@ -3082,22 +3082,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
if (sg && csum && (mss != GSO_BY_FRAGS)) {
if (!(features & NETIF_F_GSO_PARTIAL)) {
struct sk_buff *iter;
unsigned int frag_len;
if (!list_skb ||
!net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
goto normal;
/* Split the buffer at the frag_list pointer.
* This is based on the assumption that all
* buffers in the chain excluding the last
* containing the same amount of data.
/* If we get here then all the required
* GSO features except frag_list are supported.
* Try to split the SKB to multiple GSO SKBs
* with no frag_list.
* Currently we can do that only when the buffers don't
* have a linear part and all the buffers except
* the last are of the same length.
*/
frag_len = list_skb->len;
skb_walk_frags(head_skb, iter) {
if (frag_len != iter->len && iter->next)
goto normal;
if (skb_headlen(iter))
goto normal;
len -= iter->len;
}
if (len != frag_len)
goto normal;
}
/* GSO partial only requires that we trim off any excess that
......
......@@ -388,7 +388,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((&hdr->segments_left) -
skb_network_header(skb)));
kfree_skb(skb);
return -1;
}
......
......@@ -774,7 +774,8 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
* Delete a VIF entry
*/
static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
struct list_head *head)
{
struct mif_device *v;
struct net_device *dev;
......@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
dev->ifindex, &in6_dev->cnf);
}
if (v->flags & MIFF_REGISTER)
if ((v->flags & MIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
dev_put(dev);
......@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this,
struct mr6_table *mrt;
struct mif_device *v;
int ct;
LIST_HEAD(list);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
......@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this,
v = &mrt->vif6_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
if (v->dev == dev)
mif6_delete(mrt, ct, &list);
mif6_delete(mrt, ct, 1, NULL);
}
}
unregister_netdevice_many(&list);
return NOTIFY_DONE;
}
......@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
for (i = 0; i < mrt->maxvif; i++) {
if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
continue;
mif6_delete(mrt, i, &list);
mif6_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
......@@ -1707,7 +1706,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
return -EFAULT;
rtnl_lock();
ret = mif6_delete(mrt, mifi, NULL);
ret = mif6_delete(mrt, mifi, 0, NULL);
rtnl_unlock();
return ret;
......
......@@ -1854,6 +1854,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
int addr_type;
int err = -EINVAL;
/* RTF_PCPU is an internal flag; can not be set by userspace */
if (cfg->fc_flags & RTF_PCPU)
goto out;
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
goto out;
#ifndef CONFIG_IPV6_SUBTREES
......
......@@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
struct sr6_tlv *tlv;
unsigned int tlv_len;
if (trailing < sizeof(*tlv))
return false;
tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
tlv_len = sizeof(*tlv) + tlv->len;
......
......@@ -63,8 +63,13 @@ struct pfkey_sock {
} u;
struct sk_buff *skb;
} dump;
struct mutex dump_lock;
};
static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
xfrm_address_t *saddr, xfrm_address_t *daddr,
u16 *family);
static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
{
return (struct pfkey_sock *)sk;
......@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
struct pfkey_sock *pfk;
int err;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
......@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
if (sk == NULL)
goto out;
pfk = pfkey_sk(sk);
mutex_init(&pfk->dump_lock);
sock->ops = &pfkey_ops;
sock_init_data(sock, sk);
......@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
struct sadb_msg *hdr;
int rc;
mutex_lock(&pfk->dump_lock);
if (!pfk->dump.dump) {
rc = 0;
goto out;
}
rc = pfk->dump.dump(pfk);
if (rc == -ENOBUFS)
return 0;
if (rc == -ENOBUFS) {
rc = 0;
goto out;
}
if (pfk->dump.skb) {
if (!pfkey_can_dump(&pfk->sk))
return 0;
if (!pfkey_can_dump(&pfk->sk)) {
rc = 0;
goto out;
}
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
......@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
}
pfkey_terminate_dump(pfk);
out:
mutex_unlock(&pfk->dump_lock);
return rc;
}
......@@ -1793,19 +1815,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
struct xfrm_address_filter *filter = NULL;
struct pfkey_sock *pfk = pfkey_sk(sk);
if (pfk->dump.dump != NULL)
mutex_lock(&pfk->dump_lock);
if (pfk->dump.dump != NULL) {
mutex_unlock(&pfk->dump_lock);
return -EBUSY;
}
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
if (proto == 0)
if (proto == 0) {
mutex_unlock(&pfk->dump_lock);
return -EINVAL;
}
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
if (filter == NULL)
if (filter == NULL) {
mutex_unlock(&pfk->dump_lock);
return -ENOMEM;
}
memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
sizeof(xfrm_address_t));
......@@ -1821,6 +1850,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
pfk->dump.dump = pfkey_dump_sa;
pfk->dump.done = pfkey_dump_sa_done;
xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
......@@ -1913,19 +1943,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
/* addresses present only in tunnel mode */
if (t->mode == XFRM_MODE_TUNNEL) {
u8 *sa = (u8 *) (rq + 1);
int family, socklen;
family = pfkey_sockaddr_extract((struct sockaddr *)sa,
&t->saddr);
if (!family)
return -EINVAL;
int err;
socklen = pfkey_sockaddr_len(family);
if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
&t->id.daddr) != family)
return -EINVAL;
t->encap_family = family;
err = parse_sockaddr_pair(
(struct sockaddr *)(rq + 1),
rq->sadb_x_ipsecrequest_len - sizeof(*rq),
&t->saddr, &t->id.daddr, &t->encap_family);
if (err)
return err;
} else
t->encap_family = xp->family;
......@@ -1945,7 +1970,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
return -EINVAL;
while (len >= sizeof(struct sadb_x_ipsecrequest)) {
while (len >= sizeof(*rq)) {
if (len < rq->sadb_x_ipsecrequest_len ||
rq->sadb_x_ipsecrequest_len < sizeof(*rq))
return -EINVAL;
if ((err = parse_ipsecrequest(xp, rq)) < 0)
return err;
len -= rq->sadb_x_ipsecrequest_len;
......@@ -2408,7 +2437,6 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
return err;
}
#ifdef CONFIG_NET_KEY_MIGRATE
static int pfkey_sockaddr_pair_size(sa_family_t family)
{
return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
......@@ -2420,7 +2448,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
{
int af, socklen;
if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
return -EINVAL;
af = pfkey_sockaddr_extract(sa, saddr);
......@@ -2436,6 +2464,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
return 0;
}
#ifdef CONFIG_NET_KEY_MIGRATE
static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
struct xfrm_migrate *m)
{
......@@ -2443,13 +2472,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
struct sadb_x_ipsecrequest *rq2;
int mode;
if (len <= sizeof(struct sadb_x_ipsecrequest) ||
len < rq1->sadb_x_ipsecrequest_len)
if (len < sizeof(*rq1) ||
len < rq1->sadb_x_ipsecrequest_len ||
rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
return -EINVAL;
/* old endoints */
err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
rq1->sadb_x_ipsecrequest_len,
rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
&m->old_saddr, &m->old_daddr,
&m->old_family);
if (err)
......@@ -2458,13 +2488,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
len -= rq1->sadb_x_ipsecrequest_len;
if (len <= sizeof(struct sadb_x_ipsecrequest) ||
len < rq2->sadb_x_ipsecrequest_len)
if (len <= sizeof(*rq2) ||
len < rq2->sadb_x_ipsecrequest_len ||
rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
return -EINVAL;
/* new endpoints */
err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
rq2->sadb_x_ipsecrequest_len,
rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
&m->new_saddr, &m->new_daddr,
&m->new_family);
if (err)
......@@ -2679,14 +2710,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
{
struct pfkey_sock *pfk = pfkey_sk(sk);
if (pfk->dump.dump != NULL)
mutex_lock(&pfk->dump_lock);
if (pfk->dump.dump != NULL) {
mutex_unlock(&pfk->dump_lock);
return -EBUSY;
}
pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_portid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sp;
pfk->dump.done = pfkey_dump_sp_done;
xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
......
......@@ -208,6 +208,51 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
return len;
}
static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
int rtap_vendor_space)
{
struct {
struct ieee80211_hdr_3addr hdr;
u8 category;
u8 action_code;
} __packed action;
if (!sdata)
return;
BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
if (skb->len < rtap_vendor_space + sizeof(action) +
VHT_MUMIMO_GROUPS_DATA_LEN)
return;
if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
return;
skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
if (!ieee80211_is_action(action.hdr.frame_control))
return;
if (action.category != WLAN_CATEGORY_VHT)
return;
if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
return;
if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
return;
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
return;
skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
skb_queue_tail(&sdata->skb_queue, skb);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
/*
* ieee80211_add_rx_radiotap_header - add radiotap header
*
......@@ -515,7 +560,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
struct net_device *prev_dev = NULL;
int present_fcs_len = 0;
unsigned int rtap_vendor_space = 0;
struct ieee80211_mgmt *mgmt;
struct ieee80211_sub_if_data *monitor_sdata =
rcu_dereference(local->monitor_sdata);
......@@ -553,6 +597,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
return remove_monitor_info(local, origskb, rtap_vendor_space);
}
ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
/* room for the radiotap header based on driver features */
rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
needed_headroom = rt_hdrlen - rtap_vendor_space;
......@@ -618,23 +664,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
ieee80211_rx_stats(sdata->dev, skb->len);
}
mgmt = (void *)skb->data;
if (monitor_sdata &&
skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_VHT &&
mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
if (mu_skb) {
mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
ieee80211_queue_work(&local->hw, &monitor_sdata->work);
}
}
if (prev_dev) {
skb->dev = prev_dev;
netif_receive_skb(skb);
......@@ -3610,6 +3639,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
!ether_addr_equal(bssid, hdr->addr1))
return false;
}
/*
* 802.11-2016 Table 9-26 says that for data frames, A1 must be
* the BSSID - we've checked that already but may have accepted
* the wildcard (ff:ff:ff:ff:ff:ff).
*
* It also says:
* The BSSID of the Data frame is determined as follows:
* a) If the STA is contained within an AP or is associated
* with an AP, the BSSID is the address currently in use
* by the STA contained in the AP.
*
* So we should not accept data frames with an address that's
* multicast.
*
* Accepting it also opens a security problem because stations
* could encrypt it with the GTK and inject traffic that way.
*/
if (ieee80211_is_data(hdr->frame_control) && multicast)
return false;
return true;
case NL80211_IFTYPE_WDS:
if (bssid || !ieee80211_is_data(hdr->frame_control))
......
......@@ -658,7 +658,9 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
}
if (plen != len) {
skb_pad(skb, plen - len);
rc = skb_pad(skb, plen - len);
if (rc)
goto out_node;
skb_put(skb, plen - len);
}
......
......@@ -529,20 +529,20 @@ int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
return err;
}
static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb)
static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
{
a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL);
if (!a->act_cookie)
return -ENOMEM;
struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return NULL;
a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
if (!a->act_cookie->data) {
kfree(a->act_cookie);
return -ENOMEM;
c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
if (!c->data) {
kfree(c);
return NULL;
}
a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]);
c->len = nla_len(tb[TCA_ACT_COOKIE]);
return 0;
return c;
}
struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
......@@ -551,6 +551,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
{
struct tc_action *a;
struct tc_action_ops *a_o;
struct tc_cookie *cookie = NULL;
char act_name[IFNAMSIZ];
struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
......@@ -566,6 +567,18 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
goto err_out;
if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
goto err_out;
if (tb[TCA_ACT_COOKIE]) {
int cklen = nla_len(tb[TCA_ACT_COOKIE]);
if (cklen > TC_COOKIE_MAX_SIZE)
goto err_out;
cookie = nla_memdup_cookie(tb);
if (!cookie) {
err = -ENOMEM;
goto err_out;
}
}
} else {
err = -EINVAL;
if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
......@@ -604,20 +617,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
if (err < 0)
goto err_mod;
if (tb[TCA_ACT_COOKIE]) {
int cklen = nla_len(tb[TCA_ACT_COOKIE]);
if (cklen > TC_COOKIE_MAX_SIZE) {
err = -EINVAL;
tcf_hash_release(a, bind);
goto err_mod;
}
if (nla_memdup_cookie(a, tb) < 0) {
err = -ENOMEM;
tcf_hash_release(a, bind);
goto err_mod;
if (name == NULL && tb[TCA_ACT_COOKIE]) {
if (a->act_cookie) {
kfree(a->act_cookie->data);
kfree(a->act_cookie);
}
a->act_cookie = cookie;
}
/* module count goes up only when brand new policy is created
......@@ -632,6 +637,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
err_mod:
module_put(a_o->owner);
err_out:
if (cookie) {
kfree(cookie->data);
kfree(cookie);
}
return ERR_PTR(err);
}
......
......@@ -282,7 +282,7 @@ static void test_arraymap_percpu(int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
int key, next_key, fd, i;
long values[nr_cpus];
long long values[nr_cpus];
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
sizeof(values[0]), 2, 0);
......@@ -340,7 +340,7 @@ static void test_arraymap_percpu_many_keys(void)
* allocator more than anything else
*/
unsigned int nr_keys = 2000;
long values[nr_cpus];
long long values[nr_cpus];
int key, fd, i;
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
......
......@@ -75,7 +75,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
{
int fd, val;
fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP));
fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP));
if (fd < 0) {
perror("socket packet");
exit(1);
......@@ -95,6 +95,24 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
return fd;
}
static void sock_fanout_set_cbpf(int fd)
{
struct sock_filter bpf_filter[] = {
BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80), /* ldb [80] */
BPF_STMT(BPF_RET+BPF_A, 0), /* ret A */
};
struct sock_fprog bpf_prog;
bpf_prog.filter = bpf_filter;
bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog,
sizeof(bpf_prog))) {
perror("fanout data cbpf");
exit(1);
}
}
static void sock_fanout_set_ebpf(int fd)
{
const int len_off = __builtin_offsetof(struct __sk_buff, len);
......@@ -270,7 +288,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
exit(1);
}
if (type == PACKET_FANOUT_CBPF)
sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
sock_fanout_set_cbpf(fds[0]);
else if (type == PACKET_FANOUT_EBPF)
sock_fanout_set_ebpf(fds[0]);
......
......@@ -38,7 +38,7 @@
# define __maybe_unused __attribute__ ((__unused__))
#endif
static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
static __maybe_unused void pair_udp_setfilter(int fd)
{
/* the filter below checks for all of the following conditions that
* are based on the contents of create_payload()
......@@ -76,23 +76,16 @@ static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
};
struct sock_fprog bpf_prog;
if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
bpf_filter[5].code = 0x16; /* RET A */
bpf_prog.filter = bpf_filter;
bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
if (setsockopt(fd, lvl, optnum, &bpf_prog,
if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
sizeof(bpf_prog))) {
perror("setsockopt SO_ATTACH_FILTER");
exit(1);
}
}
static __maybe_unused void pair_udp_setfilter(int fd)
{
sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
}
static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
{
struct sockaddr_in saddr, daddr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment