Commit de8856d2 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 1) Name string overrun fix in gianfar driver from Joe Perches.

 2) VHOST bug fixes from Michael S. Tsirkin and Nadav Har'El

 3) Fix dependencies on xt_LOG netfilter module, from Pablo Neira Ayuso.

 4) Fix RCU locking in xt_CT, also from Pablo Neira Ayuso.

 5) Add a parameter to skb_add_rx_frag() so we can fix the truesize
    adjustments in the drivers that use it.  The individual drivers
    aren't fixed by this commit, but will be dealt with using follow-on
    commits.  From Eric Dumazet.

 6) Add some device IDs to qmi_wwan driver, from Andrew Bird.

 7) Fix a potential rcu_read_lock() imbalancein rt6_fill_node().  From
    Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  net: fix a potential rcu_read_lock() imbalance in rt6_fill_node()
  net: add a truesize parameter to skb_add_rx_frag()
  gianfar: Fix possible overrun and simplify interrupt name field creation
  USB: qmi_wwan: Add ZTE (Vodafone) K3570-Z and K3571-Z net interfaces
  USB: option: Ignore ZTE (Vodafone) K3570/71 net interfaces
  USB: qmi_wwan: Add ZTE (Vodafone) K3565-Z and K4505-Z net interfaces
  qlcnic: Bug fix for LRO
  netfilter: nf_conntrack: permanently attach timeout policy to conntrack
  netfilter: xt_CT: fix assignation of the generic protocol tracker
  netfilter: xt_CT: missing rcu_read_lock section in timeout assignment
  netfilter: cttimeout: fix dependency with l4protocol conntrack module
  netfilter: xt_LOG: use CONFIG_IP6_NF_IPTABLES instead of CONFIG_IPV6
  vhost: fix release path lockdep checks
  vhost: don't forget to schedule()
  tools/virtio: stub out strong barriers
  tools/virtio: add linux/hrtimer.h stub
  tools/virtio: add linux/module.h stub
parents 66f03c61 94f826b8
......@@ -968,7 +968,6 @@ static int gfar_probe(struct platform_device *ofdev)
struct gfar_private *priv = NULL;
struct gfar __iomem *regs = NULL;
int err = 0, i, grp_idx = 0;
int len_devname;
u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
u32 isrg = 0;
u32 __iomem *baddr;
......@@ -1169,40 +1168,16 @@ static int gfar_probe(struct platform_device *ofdev)
priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
len_devname = strlen(dev->name);
for (i = 0; i < priv->num_grps; i++) {
strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
len_devname);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
"_g", sizeof("_g"));
priv->gfargrp[i].int_name_tx[
strlen(priv->gfargrp[i].int_name_tx)] = i+48;
strncpy(&priv->gfargrp[i].int_name_tx[strlen(
priv->gfargrp[i].int_name_tx)],
"_tx", sizeof("_tx") + 1);
strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
len_devname);
strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
"_g", sizeof("_g"));
priv->gfargrp[i].int_name_rx[
strlen(priv->gfargrp[i].int_name_rx)] = i+48;
strncpy(&priv->gfargrp[i].int_name_rx[strlen(
priv->gfargrp[i].int_name_rx)],
"_rx", sizeof("_rx") + 1);
strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
len_devname);
strncpy(&priv->gfargrp[i].int_name_er[len_devname],
"_g", sizeof("_g"));
priv->gfargrp[i].int_name_er[strlen(
priv->gfargrp[i].int_name_er)] = i+48;
strncpy(&priv->gfargrp[i].int_name_er[strlen(\
priv->gfargrp[i].int_name_er)],
"_er", sizeof("_er") + 1);
sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
dev->name, "_g", '0' + i, "_tx");
sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
dev->name, "_g", '0' + i, "_rx");
sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
dev->name, "_g", '0' + i, "_er");
} else
priv->gfargrp[i].int_name_tx[len_devname] = '\0';
strcpy(priv->gfargrp[i].int_name_tx, dev->name);
}
/* Initialize the filer table */
......
......@@ -517,7 +517,7 @@ extern const char gfar_driver_version[];
#define RXFCB_PERR_MASK 0x000c
#define RXFCB_PERR_BADL3 0x0008
#define GFAR_INT_NAME_MAX IFNAMSIZ + 4
#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
struct txbd8
{
......
......@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
#define _QLCNIC_LINUX_SUBVERSION 25
#define QLCNIC_LINUX_VERSIONID "5.0.26"
#define _QLCNIC_LINUX_SUBVERSION 27
#define QLCNIC_LINUX_VERSIONID "5.0.27"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
......
......@@ -1458,8 +1458,10 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (!err)
if (!err) {
__qlcnic_up(adapter, netdev);
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
}
netif_device_attach(netdev);
......
......@@ -164,12 +164,14 @@ static void rx_complete(struct urb *req)
/* Can't use pskb_pull() on page in IRQ */
memcpy(skb_put(skb, 1), page_address(page), 1);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 1, req->actual_length);
page, 1, req->actual_length,
req->actual_length);
page = NULL;
}
} else {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 0, req->actual_length);
page, 0, req->actual_length,
req->actual_length);
page = NULL;
}
if (req->actual_length < PAGE_SIZE)
......
......@@ -409,6 +409,42 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K3565-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x0063,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K3570-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x1008,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K3571-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x1010,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K4505-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x0104,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
......
......@@ -499,7 +499,8 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
le32_to_cpu(rx_end->status), stats);
skb_add_rx_frag(skb, 0, rxb->page,
(void *)rx_hdr->payload - (void *)pkt, len);
(void *)rx_hdr->payload - (void *)pkt, len,
len);
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
......
......@@ -596,7 +596,8 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
return;
}
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
len);
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
......
......@@ -796,7 +796,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
offset = (void *)hdr - rxb_addr(rxb);
p = rxb_steal_page(rxb);
skb_add_rx_frag(skb, 0, p, offset, len);
skb_add_rx_frag(skb, 0, p, offset, len, len);
iwl_update_stats(priv, false, fc, len);
......
......@@ -345,7 +345,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb->len <= 1, req->actual);
skb->len <= 1, req->actual, req->actual);
page = NULL;
if (req->actual < req->length) { /* Last fragment */
......
......@@ -903,8 +903,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
......
......@@ -588,7 +588,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
vhost_dev_cleanup(&n->dev);
vhost_dev_cleanup(&n->dev, false);
if (tx_sock)
fput(tx_sock->file);
if (rx_sock)
......
......@@ -222,6 +222,8 @@ static int vhost_worker(void *data)
if (work) {
__set_current_state(TASK_RUNNING);
work->fn(work);
if (need_resched())
schedule();
} else
schedule();
......@@ -403,7 +405,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
if (!memory)
return -ENOMEM;
vhost_dev_cleanup(dev);
vhost_dev_cleanup(dev, true);
memory->nregions = 0;
RCU_INIT_POINTER(dev->memory, memory);
......@@ -434,8 +436,8 @@ int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
return j;
}
/* Caller should have device mutex */
void vhost_dev_cleanup(struct vhost_dev *dev)
/* Caller should have device mutex if and only if locked is set */
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
{
int i;
......@@ -472,6 +474,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
dev->log_file = NULL;
/* No one will access memory at this point */
kfree(rcu_dereference_protected(dev->memory,
locked ==
lockdep_is_held(&dev->mutex)));
RCU_INIT_POINTER(dev->memory, NULL);
WARN_ON(!list_empty(&dev->work_list));
......
......@@ -163,7 +163,7 @@ struct vhost_dev {
long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
long vhost_dev_check_owner(struct vhost_dev *);
long vhost_dev_reset_owner(struct vhost_dev *);
void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_cleanup(struct vhost_dev *, bool locked);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
int vhost_vq_access_ok(struct vhost_virtqueue *vq);
int vhost_log_access_ok(struct vhost_dev *);
......
......@@ -1245,7 +1245,7 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
}
extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
int off, int size);
int off, int size, unsigned int truesize);
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
......
......@@ -118,6 +118,10 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
extern struct nf_conntrack_l4proto *
__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto);
extern struct nf_conntrack_l4proto *
nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto);
extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
/* Protocol registration. */
extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto);
extern void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto);
......
......@@ -15,7 +15,7 @@ struct ctnl_timeout {
atomic_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
__u16 l3num;
__u8 l4num;
struct nf_conntrack_l4proto *l4proto;
char data[0];
};
......
......@@ -321,12 +321,12 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
EXPORT_SYMBOL(__netdev_alloc_skb);
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size)
int size, unsigned int truesize)
{
skb_fill_page_desc(skb, i, page, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += size;
skb->truesize += truesize;
}
EXPORT_SYMBOL(skb_add_rx_frag);
......
......@@ -2474,8 +2474,12 @@ static int rt6_fill_node(struct net *net,
rcu_read_lock();
n = dst_get_neighbour_noref(&rt->dst);
if (n)
NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
if (n) {
if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
rcu_read_unlock();
goto nla_put_failure;
}
}
rcu_read_unlock();
if (rt->dst.dev)
......
......@@ -768,8 +768,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
struct sk_buff *skb,
unsigned int dataoff, u32 hash,
unsigned int *timeouts)
unsigned int dataoff, u32 hash)
{
struct nf_conn *ct;
struct nf_conn_help *help;
......@@ -777,6 +776,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_ecache *ecache;
struct nf_conntrack_expect *exp;
u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
struct nf_conn_timeout *timeout_ext;
unsigned int *timeouts;
if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
pr_debug("Can't invert tuple.\n");
......@@ -788,12 +789,21 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
if (timeout_ext)
timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
else
timeouts = l4proto->get_timeouts(net);
if (!l4proto->new(ct, skb, dataoff, timeouts)) {
nf_conntrack_free(ct);
pr_debug("init conntrack: can't track with proto module\n");
return NULL;
}
if (timeout_ext)
nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC);
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
......@@ -854,8 +864,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
int *set_reply,
enum ip_conntrack_info *ctinfo,
unsigned int *timeouts)
enum ip_conntrack_info *ctinfo)
{
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
......@@ -875,7 +884,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
h = __nf_conntrack_find_get(net, zone, &tuple, hash);
if (!h) {
h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
skb, dataoff, hash, timeouts);
skb, dataoff, hash);
if (!h)
return NULL;
if (IS_ERR(h))
......@@ -964,19 +973,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
goto out;
}
/* Decide what timeout policy we want to apply to this flow. */
if (tmpl) {
timeout_ext = nf_ct_timeout_find(tmpl);
if (timeout_ext)
timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
else
timeouts = l4proto->get_timeouts(net);
} else
timeouts = l4proto->get_timeouts(net);
ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
l3proto, l4proto, &set_reply, &ctinfo,
timeouts);
l3proto, l4proto, &set_reply, &ctinfo);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(net, invalid);
......@@ -993,6 +991,13 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
NF_CT_ASSERT(skb->nfct);
/* Decide what timeout policy we want to apply to this flow. */
timeout_ext = nf_ct_timeout_find(ct);
if (timeout_ext)
timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
else
timeouts = l4proto->get_timeouts(net);
ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
......
......@@ -127,6 +127,27 @@ void nf_ct_l3proto_module_put(unsigned short l3proto)
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
struct nf_conntrack_l4proto *
nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num)
{
struct nf_conntrack_l4proto *p;
rcu_read_lock();
p = __nf_ct_l4proto_find(l3num, l4num);
if (!try_module_get(p->me))
p = &nf_conntrack_l4proto_generic;
rcu_read_unlock();
return p;
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get);
void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p)
{
module_put(p->me);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
static int kill_l3proto(struct nf_conn *i, void *data)
{
return nf_ct_l3num(i) == ((struct nf_conntrack_l3proto *)data)->l3proto;
......
......@@ -98,11 +98,13 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
break;
}
l4proto = __nf_ct_l4proto_find(l3num, l4num);
l4proto = nf_ct_l4proto_find_get(l3num, l4num);
/* This protocol is not supportted, skip. */
if (l4proto->l4proto != l4num)
return -EOPNOTSUPP;
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
goto err_proto_put;
}
if (matching) {
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
......@@ -110,20 +112,25 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
* different kind, sorry.
*/
if (matching->l3num != l3num ||
matching->l4num != l4num)
return -EINVAL;
matching->l4proto->l4proto != l4num) {
ret = -EINVAL;
goto err_proto_put;
}
ret = ctnl_timeout_parse_policy(matching, l4proto,
cda[CTA_TIMEOUT_DATA]);
return ret;
}
return -EBUSY;
ret = -EBUSY;
goto err_proto_put;
}
timeout = kzalloc(sizeof(struct ctnl_timeout) +
l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
if (timeout == NULL)
return -ENOMEM;
if (timeout == NULL) {
ret = -ENOMEM;
goto err_proto_put;
}
ret = ctnl_timeout_parse_policy(timeout, l4proto,
cda[CTA_TIMEOUT_DATA]);
......@@ -132,13 +139,15 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
timeout->l3num = l3num;
timeout->l4num = l4num;
timeout->l4proto = l4proto;
atomic_set(&timeout->refcnt, 1);
list_add_tail_rcu(&timeout->head, &cttimeout_list);
return 0;
err:
kfree(timeout);
err_proto_put:
nf_ct_l4proto_put(l4proto);
return ret;
}
......@@ -149,7 +158,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
unsigned int flags = pid ? NLM_F_MULTI : 0;
struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_l4proto *l4proto = timeout->l4proto;
event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
......@@ -163,20 +172,10 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4num);
NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto);
NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
htonl(atomic_read(&timeout->refcnt)));
l4proto = __nf_ct_l4proto_find(timeout->l3num, timeout->l4num);
/* If the timeout object does not match the layer 4 protocol tracker,
* then skip dumping the data part since we don't know how to
* interpret it. This may happen for UPDlite, SCTP and DCCP since
* you can unload the module.
*/
if (timeout->l4num != l4proto->l4proto)
goto out;
if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
struct nlattr *nest_parms;
int ret;
......@@ -192,7 +191,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
nla_nest_end(skb, nest_parms);
}
out:
nlmsg_end(skb, nlh);
return skb->len;
......@@ -293,6 +292,7 @@ static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
if (atomic_dec_and_test(&timeout->refcnt)) {
/* We are protected by nfnl mutex. */
list_del_rcu(&timeout->head);
nf_ct_l4proto_put(timeout->l4proto);
kfree_rcu(timeout, rcu_head);
} else {
/* still in use, restore reference counter. */
......@@ -417,6 +417,7 @@ static void __exit cttimeout_exit(void)
/* We are sure that our objects have no clients at this point,
* it's safe to release them all without checking refcnt.
*/
nf_ct_l4proto_put(cur->l4proto);
kfree_rcu(cur, rcu_head);
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
......
......@@ -14,8 +14,10 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CT.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
......@@ -217,50 +219,59 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
struct ctnl_timeout *timeout;
struct nf_conn_timeout *timeout_ext;
rcu_read_lock();
timeout_find_get =
rcu_dereference(nf_ct_timeout_find_get_hook);
if (timeout_find_get) {
const struct ipt_entry *e = par->entryinfo;
struct nf_conntrack_l4proto *l4proto;
if (e->ip.invflags & IPT_INV_PROTO) {
ret = -EINVAL;
pr_info("You cannot use inversion on "
"L4 protocol\n");
goto err3;
goto err4;
}
timeout = timeout_find_get(info->timeout);
if (timeout == NULL) {
ret = -ENOENT;
pr_info("No such timeout policy \"%s\"\n",
info->timeout);
goto err3;
goto err4;
}
if (timeout->l3num != par->family) {
ret = -EINVAL;
pr_info("Timeout policy `%s' can only be "
"used by L3 protocol number %d\n",
info->timeout, timeout->l3num);
goto err3;
goto err4;
}
if (timeout->l4num != e->ip.proto) {
/* Make sure the timeout policy matches any existing
* protocol tracker, otherwise default to generic.
*/
l4proto = __nf_ct_l4proto_find(par->family,
e->ip.proto);
if (timeout->l4proto->l4proto != l4proto->l4proto) {
ret = -EINVAL;
pr_info("Timeout policy `%s' can only be "
"used by L4 protocol number %d\n",
info->timeout, timeout->l4num);
goto err3;
info->timeout,
timeout->l4proto->l4proto);
goto err4;
}
timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
GFP_KERNEL);
if (timeout_ext == NULL) {
ret = -ENOMEM;
goto err3;
goto err4;
}
} else {
ret = -ENOENT;
pr_info("Timeout policy base is empty\n");
goto err3;
goto err4;
}
rcu_read_unlock();
}
#endif
......@@ -270,6 +281,8 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
info->ct = ct;
return 0;
err4:
rcu_read_unlock();
err3:
nf_conntrack_free(ct);
err2:
......@@ -311,6 +324,7 @@ static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
nf_ct_l3proto_module_put(par->family);
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
rcu_read_lock();
timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
if (timeout_put) {
......@@ -318,6 +332,7 @@ static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
if (timeout_ext)
timeout_put(timeout_ext->timeout);
}
rcu_read_unlock();
#endif
}
nf_ct_put(info->ct);
......
......@@ -480,7 +480,7 @@ ipt_log_packet(u_int8_t pf,
sb_close(m);
}
#if IS_ENABLED(CONFIG_IPV6)
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
/* One level of recursion won't kill us */
static void dump_ipv6_packet(struct sbuff *m,
const struct nf_loginfo *info,
......@@ -824,7 +824,7 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
if (par->family == NFPROTO_IPV4)
ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#if IS_ENABLED(CONFIG_IPV6)
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
......@@ -864,7 +864,7 @@ static struct xt_target log_tg_regs[] __read_mostly = {
.checkentry = log_tg_check,
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IPV6)
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "LOG",
.family = NFPROTO_IPV6,
......@@ -882,7 +882,7 @@ static struct nf_logger ipt_log_logger __read_mostly = {
.me = THIS_MODULE,
};
#if IS_ENABLED(CONFIG_IPV6)
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static struct nf_logger ip6t_log_logger __read_mostly = {
.name = "ip6t_LOG",
.logfn = &ip6t_log_packet,
......@@ -899,7 +899,7 @@ static int __init log_tg_init(void)
return ret;
nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
#if IS_ENABLED(CONFIG_IPV6)
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
#endif
return 0;
......@@ -908,7 +908,7 @@ static int __init log_tg_init(void)
static void __exit log_tg_exit(void)
{
nf_log_unregister(&ipt_log_logger);
#if IS_ENABLED(CONFIG_IPV6)
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
nf_log_unregister(&ip6t_log_logger);
#endif
xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
......
......@@ -181,6 +181,9 @@ struct virtqueue {
#define smp_mb() mb()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
/* Weak barriers should be used. If not - it's a bug */
# define rmb() abort()
# define wmb() abort()
#else
#error Please fill in barrier macros
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment