Commit 7a49efae authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits)
  netns: Fix crash by making igmp per namespace
  bnx2x: Version update
  bnx2x: Checkpatch compliance
  bnx2x: Spelling mistakes
  bnx2x: Minor code improvements
  bnx2x: Driver info
  bnx2x: 1G LED does not turn off
  bnx2x: 8073 PHY changes
  bnx2x: Change GPIO for any port
  bnx2x: Pause settings
  bnx2x: Link order with external PHY
  bnx2x: No LRO without Rx checksum
  bnx2x: Wrong structure size
  bnx2x: WoL capability
  bnx2x: Clearing MAC addresses filters
  bnx2x: Delay in while loops
  bnx2x: PBA Table Page Alignment Workaround
  bnx2x: Self-test false positive
  bnx2x: Memory allocation
  bnx2x: HW attention lock
  ...
parents 0ff82850 877acedc
......@@ -3076,6 +3076,7 @@ M: horms@verge.net.au
P: Julian Anastasov
M: ja@ssi.bg
L: netdev@vger.kernel.org
L: lvs-devel@vger.kernel.org
S: Maintained
NFS, SUNRPC, AND LOCKD CLIENTS
......
......@@ -40,20 +40,20 @@
#define DP(__mask, __fmt, __args...) do { \
if (bp->msglevel & (__mask)) \
printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
bp->dev?(bp->dev->name):"?", ##__args); \
bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* errors debug print */
#define BNX2X_DBG_ERR(__fmt, __args...) do { \
if (bp->msglevel & NETIF_MSG_PROBE) \
printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
bp->dev?(bp->dev->name):"?", ##__args); \
bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* for errors (never masked) */
#define BNX2X_ERR(__fmt, __args...) do { \
printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
bp->dev?(bp->dev->name):"?", ##__args); \
bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* before we have a dev->name use dev_info() */
......@@ -120,16 +120,8 @@
#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
#define NIG_WR(reg, val) REG_WR(bp, reg, val)
#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val)
#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
#define for_each_nondefault_queue(bp, var) \
for (var = 1; var < bp->num_queues; var++)
#define is_multi(bp) (bp->num_queues > 1)
#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
/* fast path */
......@@ -163,7 +155,7 @@ struct sw_rx_page {
#define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
/* RX_SGE_CNT is promissed to be a power of 2 */
/* RX_SGE_CNT is promised to be a power of 2 */
#define RX_SGE_MASK (RX_SGE_CNT - 1)
#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE (NUM_RX_SGE - 1)
......@@ -258,8 +250,7 @@ struct bnx2x_fastpath {
unsigned long tx_pkt,
rx_pkt,
rx_calls,
rx_alloc_failed;
rx_calls;
/* TPA related */
struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
......@@ -275,6 +266,15 @@ struct bnx2x_fastpath {
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
#define BNX2X_HAS_TX_WORK(fp) \
((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
(fp->tx_pkt_prod != fp->tx_pkt_cons))
#define BNX2X_HAS_RX_WORK(fp) \
(fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb))
#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
/* MC hsi */
#define MAX_FETCH_BD 13 /* HW max BDs per packet */
......@@ -317,7 +317,7 @@ struct bnx2x_fastpath {
#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
/* This is needed for determening of last_max */
/* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
#define __SGE_MASK_SET_BIT(el, bit) \
......@@ -386,20 +386,28 @@ struct bnx2x_fastpath {
#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
(TPA_TYPE_START | TPA_TYPE_END))
#define BNX2X_RX_SUM_OK(cqe) \
(!(cqe->fast_path_cqe.status_flags & \
(ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)))
#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
#define BNX2X_IP_CSUM_ERR(cqe) \
(!((cqe)->fast_path_cqe.status_flags & \
ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
((cqe)->fast_path_cqe.type_error_flags & \
ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
#define BNX2X_L4_CSUM_ERR(cqe) \
(!((cqe)->fast_path_cqe.status_flags & \
ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
((cqe)->fast_path_cqe.type_error_flags & \
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
#define BNX2X_RX_CSUM_OK(cqe) \
(!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
#define BNX2X_RX_SUM_FIX(cqe) \
((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
(1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
......@@ -647,6 +655,8 @@ struct bnx2x_eth_stats {
u32 brb_drop_hi;
u32 brb_drop_lo;
u32 brb_truncate_hi;
u32 brb_truncate_lo;
u32 jabber_packets_received;
......@@ -663,6 +673,9 @@ struct bnx2x_eth_stats {
u32 mac_discard;
u32 driver_xoff;
u32 rx_err_discard_pkt;
u32 rx_skb_alloc_failed;
u32 hw_csum_err;
};
#define STATS_OFFSET32(stat_name) \
......@@ -753,7 +766,6 @@ struct bnx2x {
u16 def_att_idx;
u32 attn_state;
struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
u32 aeu_mask;
u32 nig_mask;
/* slow path ring */
......@@ -772,7 +784,7 @@ struct bnx2x {
u8 stats_pending;
u8 set_mac_pending;
/* End of fileds used in the performance code paths */
/* End of fields used in the performance code paths */
int panic;
int msglevel;
......@@ -794,9 +806,6 @@ struct bnx2x {
#define BP_FUNC(bp) (bp->func)
#define BP_E1HVN(bp) (bp->func >> 1)
#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
/* assorted E1HVN */
#define IS_E1HMF(bp) (bp->e1hmf != 0)
#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
int pm_cap;
int pcie_cap;
......@@ -821,6 +830,7 @@ struct bnx2x {
u32 mf_config;
u16 e1hov;
u8 e1hmf;
#define IS_E1HMF(bp) (bp->e1hmf != 0)
u8 wol;
......@@ -836,7 +846,6 @@ struct bnx2x {
u16 rx_ticks_int;
u16 rx_ticks;
u32 stats_ticks;
u32 lin_cnt;
int state;
......@@ -852,6 +861,7 @@ struct bnx2x {
#define BNX2X_STATE_ERROR 0xf000
int num_queues;
#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
u32 rx_mode;
#define BNX2X_RX_MODE_NONE 0
......@@ -902,10 +912,17 @@ struct bnx2x {
};
#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
#define for_each_nondefault_queue(bp, var) \
for (var = 1; var < bp->num_queues; var++)
#define is_multi(bp) (bp->num_queues > 1)
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
......@@ -976,7 +993,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED_SHIFT 16
#define BNX2X_NUM_STATS 39
#define BNX2X_NUM_STATS 42
#define BNX2X_NUM_TESTS 8
#define BNX2X_MAC_LOOPBACK 0
......@@ -1007,10 +1024,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* resolution of the rate shaping timer - 100 usec */
#define RS_PERIODIC_TIMEOUT_USEC 100
/* resolution of fairness algorithm in usecs -
coefficient for clauclating the actuall t fair */
coefficient for calculating the actual t fair */
#define T_FAIR_COEF 10000000
/* number of bytes in single QM arbitration cycle -
coeffiecnt for calculating the fairness timer */
coefficient for calculating the fairness timer */
#define QM_ARB_BYTES 40000
#define FAIR_MEM 2
......
This diff is collapsed.
......@@ -1268,7 +1268,7 @@ struct doorbell {
/*
* IGU driver acknowlegement register
* IGU driver acknowledgement register
*/
struct igu_ack_register {
#if defined(__BIG_ENDIAN)
......@@ -1882,7 +1882,7 @@ struct timers_block_context {
};
/*
* structure for easy accessability to assembler
* structure for easy accessibility to assembler
*/
struct eth_tx_bd_flags {
u8 as_bitfield;
......@@ -2044,7 +2044,7 @@ struct eth_context {
/*
* ethernet doorbell
* Ethernet doorbell
*/
struct eth_tx_doorbell {
#if defined(__BIG_ENDIAN)
......@@ -2256,7 +2256,7 @@ struct ramrod_data {
};
/*
* union for ramrod data for ethernet protocol (CQE) (force size of 16 bits)
* union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
*/
union eth_ramrod_data {
struct ramrod_data general;
......@@ -2330,7 +2330,7 @@ struct spe_hdr {
};
/*
* ethernet slow path element
* Ethernet slow path element
*/
union eth_specific_data {
u8 protocol_data[8];
......@@ -2343,7 +2343,7 @@ union eth_specific_data {
};
/*
* ethernet slow path element
* Ethernet slow path element
*/
struct eth_spe {
struct spe_hdr hdr;
......@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers {
/*
* common flag to indicate existance of TPA.
* common flag to indicate existence of TPA.
*/
struct tstorm_eth_tpa_exist {
#if defined(__BIG_ENDIAN)
......@@ -2765,7 +2765,7 @@ struct tstorm_common_stats {
};
/*
* Eth statistics query sturcture for the eth_stats_quesry ramrod
* Eth statistics query structure for the eth_stats_query ramrod
*/
struct eth_stats_query {
struct xstorm_common_stats xstorm_common;
......
......@@ -72,26 +72,26 @@
struct raw_op {
u32 op :8;
u32 offset :24;
u32 op:8;
u32 offset:24;
u32 raw_data;
};
struct op_read {
u32 op :8;
u32 offset :24;
u32 op:8;
u32 offset:24;
u32 pad;
};
struct op_write {
u32 op :8;
u32 offset :24;
u32 op:8;
u32 offset:24;
u32 val;
};
struct op_string_write {
u32 op :8;
u32 offset :24;
u32 op:8;
u32 offset:24;
#ifdef __LITTLE_ENDIAN
u16 data_off;
u16 data_len;
......@@ -102,8 +102,8 @@ struct op_string_write {
};
struct op_zero {
u32 op :8;
u32 offset :24;
u32 op:8;
u32 offset:24;
u32 len;
};
......@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
/*********************************************************
There are different blobs for each PRAM section.
In addition, each blob write operation is divided into a few operations
in order to decrease the amount of phys. contigious buffer needed.
in order to decrease the amount of phys. contiguous buffer needed.
Thus, when we select a blob the address may be with some offset
from the beginning of PRAM section.
The same holds for the INT_TABLE sections.
......@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
len = op->str_wr.data_len;
data = data_base + op->str_wr.data_off;
/* carefull! it must be in order */
/* careful! it must be in order */
if (unlikely(op_type > OP_WB)) {
/* If E1 only */
......@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc)
return crc_res;
}
/* regiesers addresses are not in order
/* registers addresses are not in order
so these arrays help simplify the code */
static const int cm_start[E1H_FUNC_MAX][9] = {
{MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
......
This diff is collapsed.
This diff is collapsed.
......@@ -55,14 +55,17 @@ struct link_params {
#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS_10 3
#define LOOPBACK_EXT_PHY 4
#define LOOPBACK_EXT 5
u16 req_duplex;
u16 req_flow_ctrl;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
u16 req_line_speed; /* Also determine AutoNeg */
/* Device parameters */
u8 mac_addr[6];
u16 mtu;
/* shmem parameters */
......@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
u8 phy_addr, u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordinaly */
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output);
/* returns string representing the fw_version of the external phy */
......@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to
to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/
u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
u16 hw_led_mode, u32 chip_id);
......@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
otherwise link is down*/
u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
/* One-time initialization for external phy after power up */
u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
#endif /* BNX2X_LINK_H */
This diff is collapsed.
This diff is collapsed.
......@@ -901,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
!__pskb_pull_tail(skb, len-skb_headlen(skb)))
!__pskb_pull_tail(skb, len - skb_headlen(skb)))
return NULL;
skb->len -= len;
return skb->data += len;
......@@ -918,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
return 1;
if (unlikely(len > skb->len))
return 0;
return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
/**
......@@ -1321,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
unsigned int size = skb->len;
if (likely(size >= len))
return 0;
return skb_pad(skb, len-size);
return skb_pad(skb, len - size);
}
static inline int skb_add_data(struct sk_buff *skb,
......
......@@ -38,11 +38,6 @@ struct route_info {
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
extern struct rt6_info *ip6_prohibit_entry;
extern struct rt6_info *ip6_blk_hole_entry;
#endif
extern void ip6_route_input(struct sk_buff *skb);
extern struct dst_entry * ip6_route_output(struct net *net,
......@@ -118,7 +113,6 @@ extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
extern void rt6_ifdown(struct net *net, struct net_device *dev);
extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
extern rwlock_t rt6_lock;
/*
* Store a destination cache entry in a socket
......
......@@ -140,8 +140,24 @@ struct ip_vs_seq {
/*
* IPVS statistics object
* IPVS statistics objects
*/
struct ip_vs_estimator {
struct list_head list;
u64 last_inbytes;
u64 last_outbytes;
u32 last_conns;
u32 last_inpkts;
u32 last_outpkts;
u32 cps;
u32 inpps;
u32 outpps;
u32 inbps;
u32 outbps;
};
struct ip_vs_stats
{
__u32 conns; /* connections scheduled */
......@@ -156,7 +172,15 @@ struct ip_vs_stats
__u32 inbps; /* current in byte rate */
__u32 outbps; /* current out byte rate */
/*
* Don't add anything before the lock, because we use memcpy() to copy
* the members before the lock to struct ip_vs_stats_user in
* ip_vs_ctl.c.
*/
spinlock_t lock; /* spin lock */
struct ip_vs_estimator est; /* estimator */
};
struct dst_entry;
......@@ -440,7 +464,7 @@ struct ip_vs_app
*/
extern const char *ip_vs_proto_name(unsigned proto);
extern void ip_vs_init_hash_table(struct list_head *table, int rows);
#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0]))
#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
#define IP_VS_APP_TYPE_FTP 1
......@@ -620,7 +644,7 @@ extern int sysctl_ip_vs_expire_quiescent_template;
extern int sysctl_ip_vs_sync_threshold[2];
extern int sysctl_ip_vs_nat_icmp_send;
extern struct ip_vs_stats ip_vs_stats;
extern struct ctl_path net_vs_ctl_path[];
extern const struct ctl_path net_vs_ctl_path[];
extern struct ip_vs_service *
ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport);
......@@ -659,7 +683,7 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
/*
* IPVS rate estimator prototypes (from ip_vs_est.c)
*/
extern int ip_vs_new_estimator(struct ip_vs_stats *stats);
extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
......
......@@ -89,7 +89,10 @@ extern void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
struct netdev_queue *txq = q->dev_queue;
if (!netif_tx_queue_stopped(txq) &&
!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
__qdisc_run(q);
}
......
......@@ -99,7 +99,7 @@ struct gen_estimator_head
static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
/* Protects against NULL dereference */
/* Protects against NULL dereference and RCU write-side */
static DEFINE_RWLOCK(est_lock);
static void est_timer(unsigned long arg)
......@@ -185,6 +185,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
write_lock_bh(&est_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
......@@ -194,6 +195,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
list_add_rcu(&est->list, &elist[idx].list);
write_unlock_bh(&est_lock);
return 0;
}
......@@ -212,7 +214,6 @@ static void __gen_kill_estimator(struct rcu_head *head)
* Removes the rate estimator specified by &bstats and &rate_est
* and deletes the timer.
*
* NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic *bstats,
struct gnet_stats_rate_est *rate_est)
......@@ -226,17 +227,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats,
if (!elist[idx].timer.function)
continue;
write_lock_bh(&est_lock);
list_for_each_entry_safe(e, n, &elist[idx].list, list) {
if (e->rate_est != rate_est || e->bstats != bstats)
continue;
write_lock_bh(&est_lock);
e->bstats = NULL;
write_unlock_bh(&est_lock);
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
write_unlock_bh(&est_lock);
}
}
......
......@@ -1961,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
*/
static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
{
int ntxq;
if (!pkt_dev->odev) {
printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
"setup_inject.\n");
......@@ -1969,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
return;
}
/* make sure that we don't pick a non-existing transmit queue */
ntxq = pkt_dev->odev->real_num_tx_queues;
if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
"disabled because CPU count (%d) exceeds number ",
num_online_cpus());
printk(KERN_WARNING "pktgen: WARNING: of tx queues "
"(%d) on %s \n", ntxq, pkt_dev->odev->name);
pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
}
if (ntxq <= pkt_dev->queue_map_min) {
printk(KERN_WARNING "pktgen: WARNING: Requested "
"queue_map_min (%d) exceeds number of tx\n",
pkt_dev->queue_map_min);
printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
"%s, resetting\n", ntxq, pkt_dev->odev->name);
pkt_dev->queue_map_min = ntxq - 1;
}
if (ntxq <= pkt_dev->queue_map_max) {
printk(KERN_WARNING "pktgen: WARNING: Requested "
"queue_map_max (%d) exceeds number of tx\n",
pkt_dev->queue_map_max);
printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
"%s, resetting\n", ntxq, pkt_dev->odev->name);
pkt_dev->queue_map_max = ntxq - 1;
}
/* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac))
......
......@@ -474,6 +474,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type,
if (copy_from_user(&opt, optval, sizeof(opt)))
return -EFAULT;
/*
* rfc4340: 6.1. Change Options
*/
if (opt.dccpsf_len < 1)
return -EINVAL;
val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
if (!val)
......
......@@ -289,6 +289,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct rtable *rt;
struct iphdr *pip;
struct igmpv3_report *pig;
struct net *net = dev_net(dev);
skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL)
......@@ -299,7 +300,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
.nl_u = { .ip4_u = {
.daddr = IGMPV3_ALL_MCR } },
.proto = IPPROTO_IGMP };
if (ip_route_output_key(&init_net, &rt, &fl)) {
if (ip_route_output_key(net, &rt, &fl)) {
kfree_skb(skb);
return NULL;
}
......@@ -629,6 +630,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
struct igmphdr *ih;
struct rtable *rt;
struct net_device *dev = in_dev->dev;
struct net *net = dev_net(dev);
__be32 group = pmc ? pmc->multiaddr : 0;
__be32 dst;
......@@ -643,7 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
struct flowi fl = { .oif = dev->ifindex,
.nl_u = { .ip4_u = { .daddr = dst } },
.proto = IPPROTO_IGMP };
if (ip_route_output_key(&init_net, &rt, &fl))
if (ip_route_output_key(net, &rt, &fl))
return -1;
}
if (rt->rt_src == 0) {
......@@ -1196,9 +1198,6 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
for (im=in_dev->mc_list; im; im=im->next) {
if (im->multiaddr == addr) {
im->users++;
......@@ -1278,9 +1277,6 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
if (i->multiaddr==addr) {
if (--i->users == 0) {
......@@ -1308,9 +1304,6 @@ void ip_mc_down(struct in_device *in_dev)
ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
for (i=in_dev->mc_list; i; i=i->next)
igmp_group_dropped(i);
......@@ -1331,9 +1324,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
{
ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
in_dev->mc_tomb = NULL;
#ifdef CONFIG_IP_MULTICAST
in_dev->mr_gq_running = 0;
......@@ -1357,9 +1347,6 @@ void ip_mc_up(struct in_device *in_dev)
ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for (i=in_dev->mc_list; i; i=i->next)
......@@ -1376,9 +1363,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
/* Deactivate timers */
ip_mc_down(in_dev);
......@@ -1395,7 +1379,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
write_unlock_bh(&in_dev->mc_list_lock);
}
static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
{
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = imr->imr_multiaddr.s_addr } } };
......@@ -1404,19 +1388,19 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
struct in_device *idev = NULL;
if (imr->imr_ifindex) {
idev = inetdev_by_index(&init_net, imr->imr_ifindex);
idev = inetdev_by_index(net, imr->imr_ifindex);
if (idev)
__in_dev_put(idev);
return idev;
}
if (imr->imr_address.s_addr) {
dev = ip_dev_find(&init_net, imr->imr_address.s_addr);
dev = ip_dev_find(net, imr->imr_address.s_addr);
if (!dev)
return NULL;
dev_put(dev);
}
if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) {
if (!dev && !ip_route_output_key(net, &rt, &fl)) {
dev = rt->u.dst.dev;
ip_rt_put(rt);
}
......@@ -1754,18 +1738,16 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
struct ip_mc_socklist *iml=NULL, *i;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
int ifindex;
int count = 0;
if (!ipv4_is_multicast(addr))
return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock();
in_dev = ip_mc_find_dev(imr);
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
iml = NULL;
......@@ -1827,15 +1809,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml, **imlp;
struct in_device *in_dev;
struct net *net = sock_net(sk);
__be32 group = imr->imr_multiaddr.s_addr;
u32 ifindex;
int ret = -EADDRNOTAVAIL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock();
in_dev = ip_mc_find_dev(imr);
in_dev = ip_mc_find_dev(net, imr);
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
if (iml->multi.imr_multiaddr.s_addr != group)
......@@ -1873,21 +1853,19 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
struct in_device *in_dev = NULL;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
int leavegroup = 0;
int i, j, rv;
if (!ipv4_is_multicast(addr))
return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock();
imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
imr.imr_address.s_addr = mreqs->imr_interface;
imr.imr_ifindex = ifindex;
in_dev = ip_mc_find_dev(&imr);
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
......@@ -2007,6 +1985,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *newpsl, *psl;
struct net *net = sock_net(sk);
int leavegroup = 0;
if (!ipv4_is_multicast(addr))
......@@ -2015,15 +1994,12 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
msf->imsf_fmode != MCAST_EXCLUDE)
return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = ifindex;
in_dev = ip_mc_find_dev(&imr);
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
......@@ -2094,19 +2070,17 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
if (!ipv4_is_multicast(addr))
return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = 0;
in_dev = ip_mc_find_dev(&imr);
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
......@@ -2163,9 +2137,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
if (!ipv4_is_multicast(addr))
return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock();
err = -EADDRNOTAVAIL;
......@@ -2246,19 +2217,17 @@ void ip_mc_drop_socket(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml;
struct net *net = sock_net(sk);
if (inet->mc_list == NULL)
return;
if (!net_eq(sock_net(sk), &init_net))
return;
rtnl_lock();
while ((iml = inet->mc_list) != NULL) {
struct in_device *in_dev;
inet->mc_list = iml->next;
in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex);
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
if (in_dev != NULL) {
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
......
......@@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
}
int ip_vs_app_init(void)
int __init ip_vs_app_init(void)
{
/* we will replace it with proc_net_ipvs_create() soon */
proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
......
......@@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void)
}
int ip_vs_conn_init(void)
int __init ip_vs_conn_init(void)
{
int idx;
......
......@@ -683,9 +683,22 @@ static void
ip_vs_zero_stats(struct ip_vs_stats *stats)
{
spin_lock_bh(&stats->lock);
memset(stats, 0, (char *)&stats->lock - (char *)stats);
spin_unlock_bh(&stats->lock);
stats->conns = 0;
stats->inpkts = 0;
stats->outpkts = 0;
stats->inbytes = 0;
stats->outbytes = 0;
stats->cps = 0;
stats->inpps = 0;
stats->outpps = 0;
stats->inbps = 0;
stats->outbps = 0;
ip_vs_zero_estimator(stats);
spin_unlock_bh(&stats->lock);
}
/*
......@@ -1589,7 +1602,7 @@ static struct ctl_table vs_vars[] = {
{ .ctl_name = 0 }
};
struct ctl_path net_vs_ctl_path[] = {
const struct ctl_path net_vs_ctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "ipv4", .ctl_name = NET_IPV4, },
{ .procname = "vs", },
......@@ -1784,7 +1797,9 @@ static const struct file_operations ip_vs_info_fops = {
#endif
struct ip_vs_stats ip_vs_stats;
struct ip_vs_stats ip_vs_stats = {
.lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
};
#ifdef CONFIG_PROC_FS
static int ip_vs_stats_show(struct seq_file *seq, void *v)
......@@ -2306,7 +2321,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
};
int ip_vs_control_init(void)
int __init ip_vs_control_init(void)
{
int ret;
int idx;
......@@ -2333,8 +2348,6 @@ int ip_vs_control_init(void)
INIT_LIST_HEAD(&ip_vs_rtable[idx]);
}
memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
spin_lock_init(&ip_vs_stats.lock);
ip_vs_new_estimator(&ip_vs_stats);
/* Hook the defense timer */
......
......@@ -233,6 +233,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
.name = "dh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
.init_service = ip_vs_dh_init_svc,
.done_service = ip_vs_dh_done_svc,
.update_service = ip_vs_dh_update_svc,
......@@ -242,7 +243,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
static int __init ip_vs_dh_init(void)
{
INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
}
......
......@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/list.h>
#include <net/ip_vs.h>
......@@ -44,28 +45,11 @@
*/
struct ip_vs_estimator
{
struct ip_vs_estimator *next;
struct ip_vs_stats *stats;
u32 last_conns;
u32 last_inpkts;
u32 last_outpkts;
u64 last_inbytes;
u64 last_outbytes;
u32 cps;
u32 inpps;
u32 outpps;
u32 inbps;
u32 outbps;
};
static void estimation_timer(unsigned long arg);
static struct ip_vs_estimator *est_list = NULL;
static DEFINE_RWLOCK(est_lock);
static struct timer_list est_timer;
static LIST_HEAD(est_list);
static DEFINE_SPINLOCK(est_lock);
static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
static void estimation_timer(unsigned long arg)
{
......@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg)
u64 n_inbytes, n_outbytes;
u32 rate;
read_lock(&est_lock);
for (e = est_list; e; e = e->next) {
s = e->stats;
spin_lock(&est_lock);
list_for_each_entry(e, &est_list, list) {
s = container_of(e, struct ip_vs_stats, est);
spin_lock(&s->lock);
n_conns = s->conns;
......@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg)
s->outbps = (e->outbps+0xF)>>5;
spin_unlock(&s->lock);
}
read_unlock(&est_lock);
spin_unlock(&est_lock);
mod_timer(&est_timer, jiffies + 2*HZ);
}
int ip_vs_new_estimator(struct ip_vs_stats *stats)
void ip_vs_new_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est;
struct ip_vs_estimator *est = &stats->est;
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (est == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&est->list);
est->stats = stats;
est->last_conns = stats->conns;
est->cps = stats->cps<<10;
......@@ -142,59 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
est->last_outbytes = stats->outbytes;
est->outbps = stats->outbps<<5;
write_lock_bh(&est_lock);
est->next = est_list;
if (est->next == NULL) {
setup_timer(&est_timer, estimation_timer, 0);
est_timer.expires = jiffies + 2*HZ;
add_timer(&est_timer);
}
est_list = est;
write_unlock_bh(&est_lock);
return 0;
spin_lock_bh(&est_lock);
if (list_empty(&est_list))
mod_timer(&est_timer, jiffies + 2 * HZ);
list_add(&est->list, &est_list);
spin_unlock_bh(&est_lock);
}
void ip_vs_kill_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est, **pest;
int killed = 0;
write_lock_bh(&est_lock);
pest = &est_list;
while ((est=*pest) != NULL) {
if (est->stats != stats) {
pest = &est->next;
continue;
}
*pest = est->next;
kfree(est);
killed++;
struct ip_vs_estimator *est = &stats->est;
spin_lock_bh(&est_lock);
list_del(&est->list);
while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
spin_unlock_bh(&est_lock);
cpu_relax();
spin_lock_bh(&est_lock);
}
if (killed && est_list == NULL)
del_timer_sync(&est_timer);
write_unlock_bh(&est_lock);
spin_unlock_bh(&est_lock);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *e;
write_lock_bh(&est_lock);
for (e = est_list; e; e = e->next) {
if (e->stats != stats)
continue;
/* set counters zero */
e->last_conns = 0;
e->last_inpkts = 0;
e->last_outpkts = 0;
e->last_inbytes = 0;
e->last_outbytes = 0;
e->cps = 0;
e->inpps = 0;
e->outpps = 0;
e->inbps = 0;
e->outbps = 0;
}
write_unlock_bh(&est_lock);
struct ip_vs_estimator *est = &stats->est;
/* set counters zero, caller must hold the stats->lock lock */
est->last_inbytes = 0;
est->last_outbytes = 0;
est->last_conns = 0;
est->last_inpkts = 0;
est->last_outpkts = 0;
est->cps = 0;
est->inpps = 0;
est->outpps = 0;
est->inbps = 0;
est->outbps = 0;
}
......@@ -539,6 +539,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
.name = "lblc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
.init_service = ip_vs_lblc_init_svc,
.done_service = ip_vs_lblc_done_svc,
.update_service = ip_vs_lblc_update_svc,
......@@ -550,7 +551,6 @@ static int __init ip_vs_lblc_init(void)
{
int ret;
INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret)
......
......@@ -728,6 +728,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
.name = "lblcr",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
.init_service = ip_vs_lblcr_init_svc,
.done_service = ip_vs_lblcr_done_svc,
.update_service = ip_vs_lblcr_update_svc,
......@@ -739,7 +740,6 @@ static int __init ip_vs_lblcr_init(void)
{
int ret;
INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret)
......
......@@ -98,6 +98,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
.name = "lc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
.init_service = ip_vs_lc_init_svc,
.done_service = ip_vs_lc_done_svc,
.update_service = ip_vs_lc_update_svc,
......@@ -107,7 +108,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
static int __init ip_vs_lc_init(void)
{
INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ;
}
......
......@@ -136,6 +136,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
.name = "nq",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
.init_service = ip_vs_nq_init_svc,
.done_service = ip_vs_nq_done_svc,
.update_service = ip_vs_nq_update_svc,
......@@ -145,7 +146,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
static int __init ip_vs_nq_init(void)
{
INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
}
......
......@@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
/*
* register an ipvs protocol
*/
static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp)
static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
{
unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
......@@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
}
int ip_vs_protocol_init(void)
int __init ip_vs_protocol_init(void)
{
char protocols[64];
#define REGISTER_PROTOCOL(p) \
......
......@@ -94,6 +94,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
.name = "rr", /* name */
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
.init_service = ip_vs_rr_init_svc,
.done_service = ip_vs_rr_done_svc,
.update_service = ip_vs_rr_update_svc,
......@@ -102,7 +103,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
static int __init ip_vs_rr_init(void)
{
INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_rr_scheduler);
}
......
......@@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
write_lock_bh(&__ip_vs_sched_lock);
if (scheduler->n_list.next != &scheduler->n_list) {
if (!list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock);
ip_vs_use_count_dec();
IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
......@@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
}
write_lock_bh(&__ip_vs_sched_lock);
if (scheduler->n_list.next == &scheduler->n_list) {
if (list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock);
IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler "
"is not in the list. failed\n", scheduler->name);
......
......@@ -138,6 +138,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
.name = "sed",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
.init_service = ip_vs_sed_init_svc,
.done_service = ip_vs_sed_done_svc,
.update_service = ip_vs_sed_update_svc,
......@@ -147,7 +148,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
static int __init ip_vs_sed_init(void)
{
INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_sed_scheduler);
}
......
......@@ -230,6 +230,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
.name = "sh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
.init_service = ip_vs_sh_init_svc,
.done_service = ip_vs_sh_done_svc,
.update_service = ip_vs_sh_update_svc,
......@@ -239,7 +240,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
static int __init ip_vs_sh_init(void)
{
INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
}
......
......@@ -904,9 +904,9 @@ int stop_sync_thread(int state)
* progress of stopping the master sync daemon.
*/
spin_lock(&ip_vs_sync_lock);
spin_lock_bh(&ip_vs_sync_lock);
ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
spin_unlock(&ip_vs_sync_lock);
spin_unlock_bh(&ip_vs_sync_lock);
kthread_stop(sync_master_thread);
sync_master_thread = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
......
......@@ -126,6 +126,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
.name = "wlc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
.init_service = ip_vs_wlc_init_svc,
.done_service = ip_vs_wlc_done_svc,
.update_service = ip_vs_wlc_update_svc,
......@@ -135,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
static int __init ip_vs_wlc_init(void)
{
INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1277,6 +1277,7 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev,
r->ifi_flags = dev_get_flags(dev);
r->ifi_change = 0; /* Wireless changes don't affect those flags */
NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
/* Add the wireless events in the netlink packet */
NLA_PUT(skb, IFLA_WIRELESS, event_len, event);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment