Commit 6811d58f authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	include/linux/if_link.h
parents c4949f07 c02db8c6
...@@ -1036,7 +1036,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) ...@@ -1036,7 +1036,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
/* This actually signals the guest, using eventfd. */ /* This actually signals the guest, using eventfd. */
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{ {
__u16 flags = 0; __u16 flags;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (get_user(flags, &vq->avail->flags)) { if (get_user(flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags"); vq_err(vq, "Failed to get flags");
return; return;
......
...@@ -111,10 +111,7 @@ enum { ...@@ -111,10 +111,7 @@ enum {
IFLA_NET_NS_PID, IFLA_NET_NS_PID,
IFLA_IFALIAS, IFLA_IFALIAS,
IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */
IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VFINFO_LIST,
IFLA_VF_VLAN,
IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
IFLA_VFINFO,
IFLA_STATS64, IFLA_STATS64,
__IFLA_MAX __IFLA_MAX
}; };
...@@ -236,6 +233,24 @@ enum macvlan_mode { ...@@ -236,6 +233,24 @@ enum macvlan_mode {
/* SR-IOV virtual function managment section */ /* SR-IOV virtual function managment section */
enum {
IFLA_VF_INFO_UNSPEC,
IFLA_VF_INFO,
__IFLA_VF_INFO_MAX,
};
#define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1)
enum {
IFLA_VF_UNSPEC,
IFLA_VF_MAC, /* Hardware queue specific attributes */
IFLA_VF_VLAN,
IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
__IFLA_VF_MAX,
};
#define IFLA_VF_MAX (__IFLA_VF_MAX - 1)
struct ifla_vf_mac { struct ifla_vf_mac {
__u32 vf; __u32 vf;
__u8 mac[32]; /* MAX_ADDR_LEN */ __u8 mac[32]; /* MAX_ADDR_LEN */
......
...@@ -1206,30 +1206,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk, ...@@ -1206,30 +1206,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk,
extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
extern void tcp_free_md5sig_pool(void); extern void tcp_free_md5sig_pool(void);
extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
extern void __tcp_put_md5sig_pool(void); extern void tcp_put_md5sig_pool(void);
extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
unsigned header_len); unsigned header_len);
extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
struct tcp_md5sig_key *key); struct tcp_md5sig_key *key);
static inline
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
int cpu = get_cpu();
struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
if (!ret)
put_cpu();
return ret;
}
static inline void tcp_put_md5sig_pool(void)
{
__tcp_put_md5sig_pool();
put_cpu();
}
/* write queue abstraction */ /* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk) static inline void tcp_write_queue_purge(struct sock *sk)
{ {
......
...@@ -644,12 +644,19 @@ static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) ...@@ -644,12 +644,19 @@ static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b)
memcpy(v, &a, sizeof(a)); memcpy(v, &a, sizeof(a));
} }
/* All VF info */
static inline int rtnl_vfinfo_size(const struct net_device *dev) static inline int rtnl_vfinfo_size(const struct net_device *dev)
{ {
if (dev->dev.parent && dev_is_pci(dev->dev.parent)) if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
return dev_num_vf(dev->dev.parent) *
sizeof(struct ifla_vf_info); int num_vfs = dev_num_vf(dev->dev.parent);
else size_t size = nlmsg_total_size(sizeof(struct nlattr));
size += nlmsg_total_size(num_vfs * sizeof(struct nlattr));
size += num_vfs * (sizeof(struct ifla_vf_mac) +
sizeof(struct ifla_vf_vlan) +
sizeof(struct ifla_vf_tx_rate));
return size;
} else
return 0; return 0;
} }
...@@ -672,7 +679,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) ...@@ -672,7 +679,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
+ nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_NUM_VF */ + nla_total_size(4) /* IFLA_NUM_VF */
+ nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
+ rtnl_link_get_size(dev); /* IFLA_LINKINFO */ + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
} }
...@@ -749,14 +756,37 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, ...@@ -749,14 +756,37 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
int i; int i;
struct ifla_vf_info ivi;
NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); struct nlattr *vfinfo, *vf;
for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { int num_vfs = dev_num_vf(dev->dev.parent);
NLA_PUT_U32(skb, IFLA_NUM_VF, num_vfs);
vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
if (!vfinfo)
goto nla_put_failure;
for (i = 0; i < num_vfs; i++) {
struct ifla_vf_info ivi;
struct ifla_vf_mac vf_mac;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_tx_rate vf_tx_rate;
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break; break;
NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf;
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
vf_tx_rate.rate = ivi.tx_rate;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
goto nla_put_failure;
}
NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate);
nla_nest_end(skb, vf);
} }
nla_nest_end(skb, vfinfo);
} }
if (dev->rtnl_link_ops) { if (dev->rtnl_link_ops) {
if (rtnl_link_fill(skb, dev) < 0) if (rtnl_link_fill(skb, dev) < 0)
...@@ -818,12 +848,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { ...@@ -818,12 +848,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_LINKINFO] = { .type = NLA_NESTED }, [IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 }, [IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
[IFLA_VF_MAC] = { .type = NLA_BINARY, [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
.len = sizeof(struct ifla_vf_mac) },
[IFLA_VF_VLAN] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_vlan) },
[IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_tx_rate) },
}; };
EXPORT_SYMBOL(ifla_policy); EXPORT_SYMBOL(ifla_policy);
...@@ -832,6 +857,19 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { ...@@ -832,6 +857,19 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_DATA] = { .type = NLA_NESTED }, [IFLA_INFO_DATA] = { .type = NLA_NESTED },
}; };
static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
[IFLA_VF_INFO] = { .type = NLA_NESTED },
};
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_MAC] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_mac) },
[IFLA_VF_VLAN] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_vlan) },
[IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_tx_rate) },
};
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{ {
struct net *net; struct net *net;
...@@ -861,6 +899,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) ...@@ -861,6 +899,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return 0; return 0;
} }
static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
{
int rem, err = -EINVAL;
struct nlattr *vf;
const struct net_device_ops *ops = dev->netdev_ops;
nla_for_each_nested(vf, attr, rem) {
switch (nla_type(vf)) {
case IFLA_VF_MAC: {
struct ifla_vf_mac *ivm;
ivm = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf,
ivm->mac);
break;
}
case IFLA_VF_VLAN: {
struct ifla_vf_vlan *ivv;
ivv = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf,
ivv->vlan,
ivv->qos);
break;
}
case IFLA_VF_TX_RATE: {
struct ifla_vf_tx_rate *ivt;
ivt = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_tx_rate)
err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
ivt->rate);
break;
}
default:
err = -EINVAL;
break;
}
if (err)
break;
}
return err;
}
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified) struct nlattr **tb, char *ifname, int modified)
{ {
...@@ -991,40 +1075,17 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, ...@@ -991,40 +1075,17 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
write_unlock_bh(&dev_base_lock); write_unlock_bh(&dev_base_lock);
} }
if (tb[IFLA_VF_MAC]) { if (tb[IFLA_VFINFO_LIST]) {
struct ifla_vf_mac *ivm; struct nlattr *attr;
ivm = nla_data(tb[IFLA_VF_MAC]); int rem;
err = -EOPNOTSUPP; nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
if (ops->ndo_set_vf_mac) if (nla_type(attr) != IFLA_VF_INFO)
err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); goto errout;
if (err < 0) err = do_setvfinfo(dev, attr);
goto errout; if (err < 0)
modified = 1; goto errout;
} modified = 1;
}
if (tb[IFLA_VF_VLAN]) {
struct ifla_vf_vlan *ivv;
ivv = nla_data(tb[IFLA_VF_VLAN]);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf,
ivv->vlan,
ivv->qos);
if (err < 0)
goto errout;
modified = 1;
}
err = 0;
if (tb[IFLA_VF_TX_RATE]) {
struct ifla_vf_tx_rate *ivt;
ivt = nla_data(tb[IFLA_VF_TX_RATE]);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_tx_rate)
err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
if (err < 0)
goto errout;
modified = 1;
} }
err = 0; err = 0;
......
...@@ -2840,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) ...@@ -2840,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
if (p->md5_desc.tfm) if (p->md5_desc.tfm)
crypto_free_hash(p->md5_desc.tfm); crypto_free_hash(p->md5_desc.tfm);
kfree(p); kfree(p);
p = NULL;
} }
} }
free_percpu(pool); free_percpu(pool);
...@@ -2938,25 +2937,40 @@ struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) ...@@ -2938,25 +2937,40 @@ struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
EXPORT_SYMBOL(tcp_alloc_md5sig_pool); EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
/**
* tcp_get_md5sig_pool - get md5sig_pool for this user
*
* We use percpu structure, so if we succeed, we exit with preemption
* and BH disabled, to make sure another thread or softirq handling
* wont try to get same context.
*/
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{ {
struct tcp_md5sig_pool * __percpu *p; struct tcp_md5sig_pool * __percpu *p;
spin_lock_bh(&tcp_md5sig_pool_lock);
local_bh_disable();
spin_lock(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool; p = tcp_md5sig_pool;
if (p) if (p)
tcp_md5sig_users++; tcp_md5sig_users++;
spin_unlock_bh(&tcp_md5sig_pool_lock); spin_unlock(&tcp_md5sig_pool_lock);
return (p ? *per_cpu_ptr(p, cpu) : NULL);
} if (p)
return *per_cpu_ptr(p, smp_processor_id());
EXPORT_SYMBOL(__tcp_get_md5sig_pool); local_bh_enable();
return NULL;
}
EXPORT_SYMBOL(tcp_get_md5sig_pool);
void __tcp_put_md5sig_pool(void) void tcp_put_md5sig_pool(void)
{ {
local_bh_enable();
tcp_free_md5sig_pool(); tcp_free_md5sig_pool();
} }
EXPORT_SYMBOL(tcp_put_md5sig_pool);
EXPORT_SYMBOL(__tcp_put_md5sig_pool);
int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
struct tcphdr *th) struct tcphdr *th)
......
...@@ -148,6 +148,10 @@ void sctp_transport_free(struct sctp_transport *transport) ...@@ -148,6 +148,10 @@ void sctp_transport_free(struct sctp_transport *transport)
del_timer(&transport->T3_rtx_timer)) del_timer(&transport->T3_rtx_timer))
sctp_transport_put(transport); sctp_transport_put(transport);
/* Delete the ICMP proto unreachable timer if it's active. */
if (timer_pending(&transport->proto_unreach_timer) &&
del_timer(&transport->proto_unreach_timer))
sctp_association_put(transport->asoc);
sctp_transport_put(transport); sctp_transport_put(transport);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment