Commit c649c0e3 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	drivers/net/wireless/ath/ath5k/phy.c
	drivers/net/wireless/iwlwifi/iwl-agn.c
	drivers/net/wireless/iwlwifi/iwl3945-base.c
parents daebafed 4e2fd555
......@@ -175,7 +175,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
return -EINVAL;
}
src = iwb->read;
if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
(read < src && limit >= src))) {
pr_err("isoc write buffer frame reservation violated\n");
return -EFAULT;
......
......@@ -1851,8 +1851,17 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
else if (skb) {
/*
* We need to reset ->data to what it
* was before gfar_new_skb() re-aligned
* it to an RXBUF_ALIGNMENT boundary
* before we put the skb back on the
* recycle list.
*/
skb->data = skb->head + NET_SKB_PAD;
__skb_queue_head(&priv->rx_recycle, skb);
}
} else {
/* Increment the number of packets */
dev->stats.rx_packets++;
......
......@@ -6467,6 +6467,7 @@ static int airo_get_encode(struct net_device *dev,
{
struct airo_info *local = dev->ml_priv;
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
int wep_key_len;
u8 buf[16];
if (!local->wep_capable)
......@@ -6500,11 +6501,13 @@ static int airo_get_encode(struct net_device *dev,
dwrq->flags |= index + 1;
/* Copy the key to the user buffer */
dwrq->length = get_wep_key(local, index, &buf[0], sizeof(buf));
if (dwrq->length != -1)
memcpy(extra, buf, dwrq->length);
else
wep_key_len = get_wep_key(local, index, &buf[0], sizeof(buf));
if (wep_key_len < 0) {
dwrq->length = 0;
} else {
dwrq->length = wep_key_len;
memcpy(extra, buf, dwrq->length);
}
return 0;
}
......@@ -6617,7 +6620,7 @@ static int airo_get_encodeext(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int idx, max_key_len;
int idx, max_key_len, wep_key_len;
u8 buf[16];
if (!local->wep_capable)
......@@ -6661,11 +6664,13 @@ static int airo_get_encodeext(struct net_device *dev,
memset(extra, 0, 16);
/* Copy the key to the user buffer */
ext->key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
if (ext->key_len != -1)
memcpy(extra, buf, ext->key_len);
else
wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
if (wep_key_len < 0) {
ext->key_len = 0;
} else {
ext->key_len = wep_key_len;
memcpy(extra, buf, ext->key_len);
}
return 0;
}
......
......@@ -26,7 +26,7 @@
\*****************************/
#include <linux/pci.h> /* To determine if a card is pci-e */
#include <linux/bitops.h> /* For get_bitmask_order */
#include <linux/log2.h>
#include "ath5k.h"
#include "reg.h"
#include "base.h"
......@@ -68,10 +68,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
/* Get exponent
* ALGO: coef_exp = 14 - highest set bit position */
coef_exp = get_bitmask_order(coef_scaled);
coef_exp = ilog2(coef_scaled);
/* Doesn't make sense if it's zero*/
if (!coef_exp)
if (!coef_scaled || !coef_exp)
return -EINVAL;
/* Note: we've shifted coef_scaled by 24 */
......
......@@ -46,7 +46,7 @@
#include "iwl-6000-hw.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 1
#define IWL5000_UCODE_API_MAX 2
#define IWL5150_UCODE_API_MAX 2
/* Lowest firmware API version supported */
......
......@@ -227,9 +227,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
/* The HW is no longer scanning */
clear_bit(STATUS_SCAN_HW, &priv->status);
/* The scan completion notification came in, so kill that timer... */
cancel_delayed_work(&priv->scan_check);
IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n",
(priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
"2.4" : "5.2",
......@@ -592,6 +589,8 @@ static void iwl_bg_request_scan(struct work_struct *data)
mutex_lock(&priv->mutex);
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready(priv)) {
IWL_WARN(priv, "request scan called when driver not ready.\n");
goto done;
......@@ -819,6 +818,8 @@ void iwl_bg_scan_completed(struct work_struct *work)
IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
cancel_delayed_work(&priv->scan_check);
ieee80211_scan_completed(priv->hw, false);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
......
......@@ -2937,6 +2937,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
mutex_lock(&priv->mutex);
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready(priv)) {
IWL_WARN(priv, "request scan called when driver not ready.\n");
goto done;
......
......@@ -138,7 +138,7 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
if (cipher == CIPHER_TKIP_NO_MIC)
cipher = CIPHER_TKIP;
if (cipher == CIPHER_NONE || cipher > CIPHER_MAX)
if (cipher == CIPHER_NONE || cipher >= CIPHER_MAX)
return;
/* Remove CIPHER_NONE index */
......
......@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
if (pkt_dev->cflows) {
/* let go of the SAs if we have them */
int i = 0;
for (; i < pkt_dev->nflows; i++){
for (; i < pkt_dev->cflows; i++) {
struct xfrm_state *x = pkt_dev->flows[i].x;
if (x) {
xfrm_state_put(x);
......
......@@ -2291,7 +2291,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
next_skb:
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
if (abs_offset < block_limit) {
if (abs_offset < block_limit && !st->frag_data) {
*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}
......
......@@ -408,7 +408,7 @@ config INET_XFRM_MODE_BEET
config INET_LRO
bool "Large Receive Offload (ipv4/tcp)"
default y
---help---
Support for Large Receive Offload (ipv4/tcp).
......
......@@ -986,9 +986,12 @@ fib_find_node(struct trie *t, u32 key)
static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
{
int wasfull;
t_key cindex, key = tn->key;
t_key cindex, key;
struct tnode *tp;
preempt_disable();
key = tn->key;
while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
......@@ -1007,6 +1010,7 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
if (IS_TNODE(tn))
tn = (struct tnode *)resize(t, (struct tnode *)tn);
preempt_enable();
return (struct node *)tn;
}
......
......@@ -784,8 +784,8 @@ static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
struct rtable *rth, **rthp;
unsigned long length = 0, samples = 0;
struct rtable *rth, *aux, **rthp;
unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0;
u64 mult;
......@@ -795,9 +795,9 @@ static void rt_check_expire(void)
goal = (unsigned int)mult;
if (goal > rt_hash_mask)
goal = rt_hash_mask + 1;
length = 0;
for (; goal > 0; goal--) {
unsigned long tmo = ip_rt_gc_timeout;
unsigned long length;
i = (i + 1) & rt_hash_mask;
rthp = &rt_hash_table[i].chain;
......@@ -809,8 +809,10 @@ static void rt_check_expire(void)
if (*rthp == NULL)
continue;
length = 0;
spin_lock_bh(rt_hash_lock_addr(i));
while ((rth = *rthp) != NULL) {
prefetch(rth->u.dst.rt_next);
if (rt_is_expired(rth)) {
*rthp = rth->u.dst.rt_next;
rt_free(rth);
......@@ -819,33 +821,30 @@ static void rt_check_expire(void)
if (rth->u.dst.expires) {
/* Entry is expired even if it is in use */
if (time_before_eq(jiffies, rth->u.dst.expires)) {
nofree:
tmo >>= 1;
rthp = &rth->u.dst.rt_next;
/*
* Only bump our length if the hash
* inputs on entries n and n+1 are not
* the same, we only count entries on
* We only count entries on
* a chain with equal hash inputs once
* so that entries for different QOS
* levels, and other non-hash input
* attributes don't unfairly skew
* the length computation
*/
if ((*rthp == NULL) ||
!compare_hash_inputs(&(*rthp)->fl,
&rth->fl))
length += ONE;
for (aux = rt_hash_table[i].chain;;) {
if (aux == rth) {
length += ONE;
break;
}
if (compare_hash_inputs(&aux->fl, &rth->fl))
break;
aux = aux->u.dst.rt_next;
}
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
tmo >>= 1;
rthp = &rth->u.dst.rt_next;
if ((*rthp == NULL) ||
!compare_hash_inputs(&(*rthp)->fl,
&rth->fl))
length += ONE;
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
goto nofree;
/* Cleanup aged off entries. */
*rthp = rth->u.dst.rt_next;
......@@ -1068,7 +1067,6 @@ out: return 0;
static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
{
struct rtable *rth, **rthp;
struct rtable *rthi;
unsigned long now;
struct rtable *cand, **candp;
u32 min_score;
......@@ -1088,7 +1086,6 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
}
rthp = &rt_hash_table[hash].chain;
rthi = NULL;
spin_lock_bh(rt_hash_lock_addr(hash));
while ((rth = *rthp) != NULL) {
......@@ -1134,17 +1131,6 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
chain_length++;
rthp = &rth->u.dst.rt_next;
/*
* check to see if the next entry in the chain
* contains the same hash input values as rt. If it does
* This is where we will insert into the list, instead of
* at the head. This groups entries that differ by aspects not
* relvant to the hash function together, which we use to adjust
* our chain length
*/
if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
rthi = rth;
}
if (cand) {
......@@ -1205,10 +1191,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
}
}
if (rthi)
rt->u.dst.rt_next = rthi->u.dst.rt_next;
else
rt->u.dst.rt_next = rt_hash_table[hash].chain;
rt->u.dst.rt_next = rt_hash_table[hash].chain;
#if RT_CACHE_DEBUG >= 2
if (rt->u.dst.rt_next) {
......@@ -1224,10 +1207,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
* previous writes to rt are comitted to memory
* before making rt visible to other CPUS.
*/
if (rthi)
rcu_assign_pointer(rthi->u.dst.rt_next, rt);
else
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
spin_unlock_bh(rt_hash_lock_addr(hash));
*rp = rt;
......
......@@ -137,6 +137,7 @@ static struct rt6_info ip6_null_entry_template = {
}
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1),
};
......@@ -159,6 +160,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
}
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1),
};
......@@ -176,6 +178,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
}
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32) 0,
.rt6i_ref = ATOMIC_INIT(1),
};
......
......@@ -343,9 +343,9 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
/* not yet present - create a candidate for a new connection
* and then redo the check */
conn = rxrpc_alloc_connection(gfp);
if (IS_ERR(conn)) {
_leave(" = %ld", PTR_ERR(conn));
return PTR_ERR(conn);
if (!conn) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
conn->trans = trans;
......@@ -508,9 +508,9 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
/* not yet present - create a candidate for a new connection and then
* redo the check */
candidate = rxrpc_alloc_connection(gfp);
if (IS_ERR(candidate)) {
_leave(" = %ld", PTR_ERR(candidate));
return PTR_ERR(candidate);
if (!candidate) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
candidate->trans = trans;
......
......@@ -1680,6 +1680,13 @@ static int regulatory_hint_core(const char *alpha2)
queue_regulatory_request(request);
/*
* This ensures last_request is populated once modules
* come swinging in and calling regulatory hints and
* wiphy_apply_custom_regulatory().
*/
flush_scheduled_work();
return 0;
}
......
......@@ -820,6 +820,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
err = -EFAULT;
goto out;
}
if (cmd == SIOCSIWENCODEEXT) {
struct iw_encode_ext *ee = (void *) extra;
if (iwp->length < sizeof(*ee) + ee->key_len)
return -EFAULT;
}
}
err = handler(dev, info, (union iwreq_data *) iwp, extra);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment