Commit 7a49efae authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits)
  netns: Fix crash by making igmp per namespace
  bnx2x: Version update
  bnx2x: Checkpatch compliance
  bnx2x: Spelling mistakes
  bnx2x: Minor code improvements
  bnx2x: Driver info
  bnx2x: 1G LED does not turn off
  bnx2x: 8073 PHY changes
  bnx2x: Change GPIO for any port
  bnx2x: Pause settings
  bnx2x: Link order with external PHY
  bnx2x: No LRO without Rx checksum
  bnx2x: Wrong structure size
  bnx2x: WoL capability
  bnx2x: Clearing MAC addresses filters
  bnx2x: Delay in while loops
  bnx2x: PBA Table Page Alignment Workaround
  bnx2x: Self-test false positive
  bnx2x: Memory allocation
  bnx2x: HW attention lock
  ...
parents 0ff82850 877acedc
...@@ -3076,6 +3076,7 @@ M: horms@verge.net.au ...@@ -3076,6 +3076,7 @@ M: horms@verge.net.au
P: Julian Anastasov P: Julian Anastasov
M: ja@ssi.bg M: ja@ssi.bg
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: lvs-devel@vger.kernel.org
S: Maintained S: Maintained
NFS, SUNRPC, AND LOCKD CLIENTS NFS, SUNRPC, AND LOCKD CLIENTS
......
...@@ -40,20 +40,20 @@ ...@@ -40,20 +40,20 @@
#define DP(__mask, __fmt, __args...) do { \ #define DP(__mask, __fmt, __args...) do { \
if (bp->msglevel & (__mask)) \ if (bp->msglevel & (__mask)) \
printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
bp->dev?(bp->dev->name):"?", ##__args); \ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0) } while (0)
/* errors debug print */ /* errors debug print */
#define BNX2X_DBG_ERR(__fmt, __args...) do { \ #define BNX2X_DBG_ERR(__fmt, __args...) do { \
if (bp->msglevel & NETIF_MSG_PROBE) \ if (bp->msglevel & NETIF_MSG_PROBE) \
printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
bp->dev?(bp->dev->name):"?", ##__args); \ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0) } while (0)
/* for errors (never masked) */ /* for errors (never masked) */
#define BNX2X_ERR(__fmt, __args...) do { \ #define BNX2X_ERR(__fmt, __args...) do { \
printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
bp->dev?(bp->dev->name):"?", ##__args); \ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0) } while (0)
/* before we have a dev->name use dev_info() */ /* before we have a dev->name use dev_info() */
...@@ -120,16 +120,8 @@ ...@@ -120,16 +120,8 @@
#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
#define NIG_WR(reg, val) REG_WR(bp, reg, val) #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val) #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
#define for_each_nondefault_queue(bp, var) \
for (var = 1; var < bp->num_queues; var++)
#define is_multi(bp) (bp->num_queues > 1)
/* fast path */ /* fast path */
...@@ -163,7 +155,7 @@ struct sw_rx_page { ...@@ -163,7 +155,7 @@ struct sw_rx_page {
#define NUM_RX_SGE_PAGES 2 #define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
/* RX_SGE_CNT is promissed to be a power of 2 */ /* RX_SGE_CNT is promised to be a power of 2 */
#define RX_SGE_MASK (RX_SGE_CNT - 1) #define RX_SGE_MASK (RX_SGE_CNT - 1)
#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE (NUM_RX_SGE - 1) #define MAX_RX_SGE (NUM_RX_SGE - 1)
...@@ -258,8 +250,7 @@ struct bnx2x_fastpath { ...@@ -258,8 +250,7 @@ struct bnx2x_fastpath {
unsigned long tx_pkt, unsigned long tx_pkt,
rx_pkt, rx_pkt,
rx_calls, rx_calls;
rx_alloc_failed;
/* TPA related */ /* TPA related */
struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
...@@ -275,6 +266,15 @@ struct bnx2x_fastpath { ...@@ -275,6 +266,15 @@ struct bnx2x_fastpath {
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
#define BNX2X_HAS_TX_WORK(fp) \
((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
(fp->tx_pkt_prod != fp->tx_pkt_cons))
#define BNX2X_HAS_RX_WORK(fp) \
(fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb))
#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
/* MC hsi */ /* MC hsi */
#define MAX_FETCH_BD 13 /* HW max BDs per packet */ #define MAX_FETCH_BD 13 /* HW max BDs per packet */
...@@ -317,7 +317,7 @@ struct bnx2x_fastpath { ...@@ -317,7 +317,7 @@ struct bnx2x_fastpath {
#define RCQ_BD(x) ((x) & MAX_RCQ_BD) #define RCQ_BD(x) ((x) & MAX_RCQ_BD)
/* This is needed for determening of last_max */ /* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
#define __SGE_MASK_SET_BIT(el, bit) \ #define __SGE_MASK_SET_BIT(el, bit) \
...@@ -386,20 +386,28 @@ struct bnx2x_fastpath { ...@@ -386,20 +386,28 @@ struct bnx2x_fastpath {
#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ #define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
(TPA_TYPE_START | TPA_TYPE_END)) (TPA_TYPE_START | TPA_TYPE_END))
#define BNX2X_RX_SUM_OK(cqe) \ #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
(!(cqe->fast_path_cqe.status_flags & \
(ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \ #define BNX2X_IP_CSUM_ERR(cqe) \
ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))) (!((cqe)->fast_path_cqe.status_flags & \
ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
((cqe)->fast_path_cqe.type_error_flags & \
ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
#define BNX2X_L4_CSUM_ERR(cqe) \
(!((cqe)->fast_path_cqe.status_flags & \
ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
((cqe)->fast_path_cqe.type_error_flags & \
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
#define BNX2X_RX_CSUM_OK(cqe) \
(!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
#define BNX2X_RX_SUM_FIX(cqe) \ #define BNX2X_RX_SUM_FIX(cqe) \
((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \ ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
(1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT)) (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) #define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) #define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
...@@ -647,6 +655,8 @@ struct bnx2x_eth_stats { ...@@ -647,6 +655,8 @@ struct bnx2x_eth_stats {
u32 brb_drop_hi; u32 brb_drop_hi;
u32 brb_drop_lo; u32 brb_drop_lo;
u32 brb_truncate_hi;
u32 brb_truncate_lo;
u32 jabber_packets_received; u32 jabber_packets_received;
...@@ -663,6 +673,9 @@ struct bnx2x_eth_stats { ...@@ -663,6 +673,9 @@ struct bnx2x_eth_stats {
u32 mac_discard; u32 mac_discard;
u32 driver_xoff; u32 driver_xoff;
u32 rx_err_discard_pkt;
u32 rx_skb_alloc_failed;
u32 hw_csum_err;
}; };
#define STATS_OFFSET32(stat_name) \ #define STATS_OFFSET32(stat_name) \
...@@ -753,7 +766,6 @@ struct bnx2x { ...@@ -753,7 +766,6 @@ struct bnx2x {
u16 def_att_idx; u16 def_att_idx;
u32 attn_state; u32 attn_state;
struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
u32 aeu_mask;
u32 nig_mask; u32 nig_mask;
/* slow path ring */ /* slow path ring */
...@@ -772,7 +784,7 @@ struct bnx2x { ...@@ -772,7 +784,7 @@ struct bnx2x {
u8 stats_pending; u8 stats_pending;
u8 set_mac_pending; u8 set_mac_pending;
/* End of fileds used in the performance code paths */ /* End of fields used in the performance code paths */
int panic; int panic;
int msglevel; int msglevel;
...@@ -794,9 +806,6 @@ struct bnx2x { ...@@ -794,9 +806,6 @@ struct bnx2x {
#define BP_FUNC(bp) (bp->func) #define BP_FUNC(bp) (bp->func)
#define BP_E1HVN(bp) (bp->func >> 1) #define BP_E1HVN(bp) (bp->func >> 1)
#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) #define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
/* assorted E1HVN */
#define IS_E1HMF(bp) (bp->e1hmf != 0)
#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
int pm_cap; int pm_cap;
int pcie_cap; int pcie_cap;
...@@ -821,6 +830,7 @@ struct bnx2x { ...@@ -821,6 +830,7 @@ struct bnx2x {
u32 mf_config; u32 mf_config;
u16 e1hov; u16 e1hov;
u8 e1hmf; u8 e1hmf;
#define IS_E1HMF(bp) (bp->e1hmf != 0)
u8 wol; u8 wol;
...@@ -836,7 +846,6 @@ struct bnx2x { ...@@ -836,7 +846,6 @@ struct bnx2x {
u16 rx_ticks_int; u16 rx_ticks_int;
u16 rx_ticks; u16 rx_ticks;
u32 stats_ticks;
u32 lin_cnt; u32 lin_cnt;
int state; int state;
...@@ -852,6 +861,7 @@ struct bnx2x { ...@@ -852,6 +861,7 @@ struct bnx2x {
#define BNX2X_STATE_ERROR 0xf000 #define BNX2X_STATE_ERROR 0xf000
int num_queues; int num_queues;
#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
u32 rx_mode; u32 rx_mode;
#define BNX2X_RX_MODE_NONE 0 #define BNX2X_RX_MODE_NONE 0
...@@ -902,10 +912,17 @@ struct bnx2x { ...@@ -902,10 +912,17 @@ struct bnx2x {
}; };
#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
#define for_each_nondefault_queue(bp, var) \
for (var = 1; var < bp->num_queues; var++)
#define is_multi(bp) (bp->num_queues > 1)
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32); u32 len32);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait) int wait)
...@@ -976,7 +993,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, ...@@ -976,7 +993,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED_SHIFT 16 #define PCICFG_LINK_SPEED_SHIFT 16
#define BNX2X_NUM_STATS 39 #define BNX2X_NUM_STATS 42
#define BNX2X_NUM_TESTS 8 #define BNX2X_NUM_TESTS 8
#define BNX2X_MAC_LOOPBACK 0 #define BNX2X_MAC_LOOPBACK 0
...@@ -1007,10 +1024,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, ...@@ -1007,10 +1024,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* resolution of the rate shaping timer - 100 usec */ /* resolution of the rate shaping timer - 100 usec */
#define RS_PERIODIC_TIMEOUT_USEC 100 #define RS_PERIODIC_TIMEOUT_USEC 100
/* resolution of fairness algorithm in usecs - /* resolution of fairness algorithm in usecs -
coefficient for clauclating the actuall t fair */ coefficient for calculating the actual t fair */
#define T_FAIR_COEF 10000000 #define T_FAIR_COEF 10000000
/* number of bytes in single QM arbitration cycle - /* number of bytes in single QM arbitration cycle -
coeffiecnt for calculating the fairness timer */ coefficient for calculating the fairness timer */
#define QM_ARB_BYTES 40000 #define QM_ARB_BYTES 40000
#define FAIR_MEM 2 #define FAIR_MEM 2
......
This diff is collapsed.
...@@ -1268,7 +1268,7 @@ struct doorbell { ...@@ -1268,7 +1268,7 @@ struct doorbell {
/* /*
* IGU driver acknowlegement register * IGU driver acknowledgement register
*/ */
struct igu_ack_register { struct igu_ack_register {
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
...@@ -1882,7 +1882,7 @@ struct timers_block_context { ...@@ -1882,7 +1882,7 @@ struct timers_block_context {
}; };
/* /*
* structure for easy accessability to assembler * structure for easy accessibility to assembler
*/ */
struct eth_tx_bd_flags { struct eth_tx_bd_flags {
u8 as_bitfield; u8 as_bitfield;
...@@ -2044,7 +2044,7 @@ struct eth_context { ...@@ -2044,7 +2044,7 @@ struct eth_context {
/* /*
* ethernet doorbell * Ethernet doorbell
*/ */
struct eth_tx_doorbell { struct eth_tx_doorbell {
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
...@@ -2256,7 +2256,7 @@ struct ramrod_data { ...@@ -2256,7 +2256,7 @@ struct ramrod_data {
}; };
/* /*
* union for ramrod data for ethernet protocol (CQE) (force size of 16 bits) * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
*/ */
union eth_ramrod_data { union eth_ramrod_data {
struct ramrod_data general; struct ramrod_data general;
...@@ -2330,7 +2330,7 @@ struct spe_hdr { ...@@ -2330,7 +2330,7 @@ struct spe_hdr {
}; };
/* /*
* ethernet slow path element * Ethernet slow path element
*/ */
union eth_specific_data { union eth_specific_data {
u8 protocol_data[8]; u8 protocol_data[8];
...@@ -2343,7 +2343,7 @@ union eth_specific_data { ...@@ -2343,7 +2343,7 @@ union eth_specific_data {
}; };
/* /*
* ethernet slow path element * Ethernet slow path element
*/ */
struct eth_spe { struct eth_spe {
struct spe_hdr hdr; struct spe_hdr hdr;
...@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers { ...@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers {
/* /*
* common flag to indicate existance of TPA. * common flag to indicate existence of TPA.
*/ */
struct tstorm_eth_tpa_exist { struct tstorm_eth_tpa_exist {
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
...@@ -2765,7 +2765,7 @@ struct tstorm_common_stats { ...@@ -2765,7 +2765,7 @@ struct tstorm_common_stats {
}; };
/* /*
* Eth statistics query sturcture for the eth_stats_quesry ramrod * Eth statistics query structure for the eth_stats_query ramrod
*/ */
struct eth_stats_query { struct eth_stats_query {
struct xstorm_common_stats xstorm_common; struct xstorm_common_stats xstorm_common;
......
...@@ -72,26 +72,26 @@ ...@@ -72,26 +72,26 @@
struct raw_op { struct raw_op {
u32 op :8; u32 op:8;
u32 offset :24; u32 offset:24;
u32 raw_data; u32 raw_data;
}; };
struct op_read { struct op_read {
u32 op :8; u32 op:8;
u32 offset :24; u32 offset:24;
u32 pad; u32 pad;
}; };
struct op_write { struct op_write {
u32 op :8; u32 op:8;
u32 offset :24; u32 offset:24;
u32 val; u32 val;
}; };
struct op_string_write { struct op_string_write {
u32 op :8; u32 op:8;
u32 offset :24; u32 offset:24;
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN
u16 data_off; u16 data_off;
u16 data_len; u16 data_len;
...@@ -102,8 +102,8 @@ struct op_string_write { ...@@ -102,8 +102,8 @@ struct op_string_write {
}; };
struct op_zero { struct op_zero {
u32 op :8; u32 op:8;
u32 offset :24; u32 offset:24;
u32 len; u32 len;
}; };
...@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, ...@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
/********************************************************* /*********************************************************
There are different blobs for each PRAM section. There are different blobs for each PRAM section.
In addition, each blob write operation is divided into a few operations In addition, each blob write operation is divided into a few operations
in order to decrease the amount of phys. contigious buffer needed. in order to decrease the amount of phys. contiguous buffer needed.
Thus, when we select a blob the address may be with some offset Thus, when we select a blob the address may be with some offset
from the beginning of PRAM section. from the beginning of PRAM section.
The same holds for the INT_TABLE sections. The same holds for the INT_TABLE sections.
...@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end) ...@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
len = op->str_wr.data_len; len = op->str_wr.data_len;
data = data_base + op->str_wr.data_off; data = data_base + op->str_wr.data_off;
/* carefull! it must be in order */ /* careful! it must be in order */
if (unlikely(op_type > OP_WB)) { if (unlikely(op_type > OP_WB)) {
/* If E1 only */ /* If E1 only */
...@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc) ...@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc)
return crc_res; return crc_res;
} }
/* regiesers addresses are not in order /* registers addresses are not in order
so these arrays help simplify the code */ so these arrays help simplify the code */
static const int cm_start[E1H_FUNC_MAX][9] = { static const int cm_start[E1H_FUNC_MAX][9] = {
{MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START, {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
......
This diff is collapsed.
This diff is collapsed.
...@@ -55,14 +55,17 @@ struct link_params { ...@@ -55,14 +55,17 @@ struct link_params {
#define LOOPBACK_BMAC 2 #define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS_10 3 #define LOOPBACK_XGXS_10 3
#define LOOPBACK_EXT_PHY 4 #define LOOPBACK_EXT_PHY 4
#define LOOPBACK_EXT 5
u16 req_duplex; u16 req_duplex;
u16 req_flow_ctrl; u16 req_flow_ctrl;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
u16 req_line_speed; /* Also determine AutoNeg */ u16 req_line_speed; /* Also determine AutoNeg */
/* Device parameters */ /* Device parameters */
u8 mac_addr[6]; u8 mac_addr[6];
u16 mtu;
/* shmem parameters */ /* shmem parameters */
...@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type, ...@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
u8 phy_addr, u8 devad, u16 reg, u16 val); u8 phy_addr, u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem, /* Reads the link_status from the shmem,
and update the link vars accordinaly */ and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input, void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output); struct link_vars *output);
/* returns string representing the fw_version of the external phy */ /* returns string representing the fw_version of the external phy */
...@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, ...@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
/* Set/Unset the led /* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs Basically, the CLC takes care of the led for the link, but in case one needs
to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/ blink the led, and LED_MODE_OFF to set the led off.*/
u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
u16 hw_led_mode, u32 chip_id); u16 hw_led_mode, u32 chip_id);
...@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config, ...@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
otherwise link is down*/ otherwise link is down*/
u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
/* One-time initialization for external phy after power up */
u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
#endif /* BNX2X_LINK_H */ #endif /* BNX2X_LINK_H */
This diff is collapsed.
This diff is collapsed.
...@@ -901,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); ...@@ -901,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{ {
if (len > skb_headlen(skb) && if (len > skb_headlen(skb) &&
!__pskb_pull_tail(skb, len-skb_headlen(skb))) !__pskb_pull_tail(skb, len - skb_headlen(skb)))
return NULL; return NULL;
skb->len -= len; skb->len -= len;
return skb->data += len; return skb->data += len;
...@@ -918,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) ...@@ -918,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
return 1; return 1;
if (unlikely(len > skb->len)) if (unlikely(len > skb->len))
return 0; return 0;
return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
} }
/** /**
...@@ -1321,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) ...@@ -1321,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
unsigned int size = skb->len; unsigned int size = skb->len;
if (likely(size >= len)) if (likely(size >= len))
return 0; return 0;
return skb_pad(skb, len-size); return skb_pad(skb, len - size);
} }
static inline int skb_add_data(struct sk_buff *skb, static inline int skb_add_data(struct sk_buff *skb,
......
...@@ -38,11 +38,6 @@ struct route_info { ...@@ -38,11 +38,6 @@ struct route_info {
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
extern struct rt6_info *ip6_prohibit_entry;
extern struct rt6_info *ip6_blk_hole_entry;
#endif
extern void ip6_route_input(struct sk_buff *skb); extern void ip6_route_input(struct sk_buff *skb);
extern struct dst_entry * ip6_route_output(struct net *net, extern struct dst_entry * ip6_route_output(struct net *net,
...@@ -118,7 +113,6 @@ extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); ...@@ -118,7 +113,6 @@ extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
extern void rt6_ifdown(struct net *net, struct net_device *dev); extern void rt6_ifdown(struct net *net, struct net_device *dev);
extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
extern rwlock_t rt6_lock;
/* /*
* Store a destination cache entry in a socket * Store a destination cache entry in a socket
......
...@@ -140,8 +140,24 @@ struct ip_vs_seq { ...@@ -140,8 +140,24 @@ struct ip_vs_seq {
/* /*
* IPVS statistics object * IPVS statistics objects
*/ */
struct ip_vs_estimator {
struct list_head list;
u64 last_inbytes;
u64 last_outbytes;
u32 last_conns;
u32 last_inpkts;
u32 last_outpkts;
u32 cps;
u32 inpps;
u32 outpps;
u32 inbps;
u32 outbps;
};
struct ip_vs_stats struct ip_vs_stats
{ {
__u32 conns; /* connections scheduled */ __u32 conns; /* connections scheduled */
...@@ -156,7 +172,15 @@ struct ip_vs_stats ...@@ -156,7 +172,15 @@ struct ip_vs_stats
__u32 inbps; /* current in byte rate */ __u32 inbps; /* current in byte rate */
__u32 outbps; /* current out byte rate */ __u32 outbps; /* current out byte rate */
/*
* Don't add anything before the lock, because we use memcpy() to copy
* the members before the lock to struct ip_vs_stats_user in
* ip_vs_ctl.c.
*/
spinlock_t lock; /* spin lock */ spinlock_t lock; /* spin lock */
struct ip_vs_estimator est; /* estimator */
}; };
struct dst_entry; struct dst_entry;
...@@ -440,7 +464,7 @@ struct ip_vs_app ...@@ -440,7 +464,7 @@ struct ip_vs_app
*/ */
extern const char *ip_vs_proto_name(unsigned proto); extern const char *ip_vs_proto_name(unsigned proto);
extern void ip_vs_init_hash_table(struct list_head *table, int rows); extern void ip_vs_init_hash_table(struct list_head *table, int rows);
#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0])) #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
#define IP_VS_APP_TYPE_FTP 1 #define IP_VS_APP_TYPE_FTP 1
...@@ -620,7 +644,7 @@ extern int sysctl_ip_vs_expire_quiescent_template; ...@@ -620,7 +644,7 @@ extern int sysctl_ip_vs_expire_quiescent_template;
extern int sysctl_ip_vs_sync_threshold[2]; extern int sysctl_ip_vs_sync_threshold[2];
extern int sysctl_ip_vs_nat_icmp_send; extern int sysctl_ip_vs_nat_icmp_send;
extern struct ip_vs_stats ip_vs_stats; extern struct ip_vs_stats ip_vs_stats;
extern struct ctl_path net_vs_ctl_path[]; extern const struct ctl_path net_vs_ctl_path[];
extern struct ip_vs_service * extern struct ip_vs_service *
ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport); ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport);
...@@ -659,7 +683,7 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp); ...@@ -659,7 +683,7 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
/* /*
* IPVS rate estimator prototypes (from ip_vs_est.c) * IPVS rate estimator prototypes (from ip_vs_est.c)
*/ */
extern int ip_vs_new_estimator(struct ip_vs_stats *stats); extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
......
...@@ -89,7 +89,10 @@ extern void __qdisc_run(struct Qdisc *q); ...@@ -89,7 +89,10 @@ extern void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q) static inline void qdisc_run(struct Qdisc *q)
{ {
if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) struct netdev_queue *txq = q->dev_queue;
if (!netif_tx_queue_stopped(txq) &&
!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
__qdisc_run(q); __qdisc_run(q);
} }
......
...@@ -99,7 +99,7 @@ struct gen_estimator_head ...@@ -99,7 +99,7 @@ struct gen_estimator_head
static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
/* Protects against NULL dereference */ /* Protects against NULL dereference and RCU write-side */
static DEFINE_RWLOCK(est_lock); static DEFINE_RWLOCK(est_lock);
static void est_timer(unsigned long arg) static void est_timer(unsigned long arg)
...@@ -185,6 +185,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, ...@@ -185,6 +185,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
est->last_packets = bstats->packets; est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10; est->avpps = rate_est->pps<<10;
write_lock_bh(&est_lock);
if (!elist[idx].timer.function) { if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list); INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx); setup_timer(&elist[idx].timer, est_timer, idx);
...@@ -194,6 +195,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, ...@@ -194,6 +195,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
list_add_rcu(&est->list, &elist[idx].list); list_add_rcu(&est->list, &elist[idx].list);
write_unlock_bh(&est_lock);
return 0; return 0;
} }
...@@ -212,7 +214,6 @@ static void __gen_kill_estimator(struct rcu_head *head) ...@@ -212,7 +214,6 @@ static void __gen_kill_estimator(struct rcu_head *head)
* Removes the rate estimator specified by &bstats and &rate_est * Removes the rate estimator specified by &bstats and &rate_est
* and deletes the timer. * and deletes the timer.
* *
* NOTE: Called under rtnl_mutex
*/ */
void gen_kill_estimator(struct gnet_stats_basic *bstats, void gen_kill_estimator(struct gnet_stats_basic *bstats,
struct gnet_stats_rate_est *rate_est) struct gnet_stats_rate_est *rate_est)
...@@ -226,17 +227,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats, ...@@ -226,17 +227,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats,
if (!elist[idx].timer.function) if (!elist[idx].timer.function)
continue; continue;
write_lock_bh(&est_lock);
list_for_each_entry_safe(e, n, &elist[idx].list, list) { list_for_each_entry_safe(e, n, &elist[idx].list, list) {
if (e->rate_est != rate_est || e->bstats != bstats) if (e->rate_est != rate_est || e->bstats != bstats)
continue; continue;
write_lock_bh(&est_lock);
e->bstats = NULL; e->bstats = NULL;
write_unlock_bh(&est_lock);
list_del_rcu(&e->list); list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator); call_rcu(&e->e_rcu, __gen_kill_estimator);
} }
write_unlock_bh(&est_lock);
} }
} }
......
...@@ -1961,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) ...@@ -1961,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
*/ */
static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
{ {
int ntxq;
if (!pkt_dev->odev) { if (!pkt_dev->odev) {
printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
"setup_inject.\n"); "setup_inject.\n");
...@@ -1969,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) ...@@ -1969,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
return; return;
} }
/* make sure that we don't pick a non-existing transmit queue */
ntxq = pkt_dev->odev->real_num_tx_queues;
if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
"disabled because CPU count (%d) exceeds number ",
num_online_cpus());
printk(KERN_WARNING "pktgen: WARNING: of tx queues "
"(%d) on %s \n", ntxq, pkt_dev->odev->name);
pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
}
if (ntxq <= pkt_dev->queue_map_min) {
printk(KERN_WARNING "pktgen: WARNING: Requested "
"queue_map_min (%d) exceeds number of tx\n",
pkt_dev->queue_map_min);
printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
"%s, resetting\n", ntxq, pkt_dev->odev->name);
pkt_dev->queue_map_min = ntxq - 1;
}
if (ntxq <= pkt_dev->queue_map_max) {
printk(KERN_WARNING "pktgen: WARNING: Requested "
"queue_map_max (%d) exceeds number of tx\n",
pkt_dev->queue_map_max);
printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
"%s, resetting\n", ntxq, pkt_dev->odev->name);
pkt_dev->queue_map_max = ntxq - 1;
}
/* Default to the interface's mac if not explicitly set. */ /* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac)) if (is_zero_ether_addr(pkt_dev->src_mac))
......
...@@ -474,6 +474,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type, ...@@ -474,6 +474,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type,
if (copy_from_user(&opt, optval, sizeof(opt))) if (copy_from_user(&opt, optval, sizeof(opt)))
return -EFAULT; return -EFAULT;
/*
* rfc4340: 6.1. Change Options
*/
if (opt.dccpsf_len < 1)
return -EINVAL;
val = kmalloc(opt.dccpsf_len, GFP_KERNEL); val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
if (!val) if (!val)
......
...@@ -289,6 +289,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) ...@@ -289,6 +289,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct rtable *rt; struct rtable *rt;
struct iphdr *pip; struct iphdr *pip;
struct igmpv3_report *pig; struct igmpv3_report *pig;
struct net *net = dev_net(dev);
skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL) if (skb == NULL)
...@@ -299,7 +300,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) ...@@ -299,7 +300,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
.nl_u = { .ip4_u = { .nl_u = { .ip4_u = {
.daddr = IGMPV3_ALL_MCR } }, .daddr = IGMPV3_ALL_MCR } },
.proto = IPPROTO_IGMP }; .proto = IPPROTO_IGMP };
if (ip_route_output_key(&init_net, &rt, &fl)) { if (ip_route_output_key(net, &rt, &fl)) {
kfree_skb(skb); kfree_skb(skb);
return NULL; return NULL;
} }
...@@ -629,6 +630,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, ...@@ -629,6 +630,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
struct igmphdr *ih; struct igmphdr *ih;
struct rtable *rt; struct rtable *rt;
struct net_device *dev = in_dev->dev; struct net_device *dev = in_dev->dev;
struct net *net = dev_net(dev);
__be32 group = pmc ? pmc->multiaddr : 0; __be32 group = pmc ? pmc->multiaddr : 0;
__be32 dst; __be32 dst;
...@@ -643,7 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, ...@@ -643,7 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
struct flowi fl = { .oif = dev->ifindex, struct flowi fl = { .oif = dev->ifindex,
.nl_u = { .ip4_u = { .daddr = dst } }, .nl_u = { .ip4_u = { .daddr = dst } },
.proto = IPPROTO_IGMP }; .proto = IPPROTO_IGMP };
if (ip_route_output_key(&init_net, &rt, &fl)) if (ip_route_output_key(net, &rt, &fl))
return -1; return -1;
} }
if (rt->rt_src == 0) { if (rt->rt_src == 0) {
...@@ -1196,9 +1198,6 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) ...@@ -1196,9 +1198,6 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL(); ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
for (im=in_dev->mc_list; im; im=im->next) { for (im=in_dev->mc_list; im; im=im->next) {
if (im->multiaddr == addr) { if (im->multiaddr == addr) {
im->users++; im->users++;
...@@ -1278,9 +1277,6 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) ...@@ -1278,9 +1277,6 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL(); ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
if (i->multiaddr==addr) { if (i->multiaddr==addr) {
if (--i->users == 0) { if (--i->users == 0) {
...@@ -1308,9 +1304,6 @@ void ip_mc_down(struct in_device *in_dev) ...@@ -1308,9 +1304,6 @@ void ip_mc_down(struct in_device *in_dev)
ASSERT_RTNL(); ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
for (i=in_dev->mc_list; i; i=i->next) for (i=in_dev->mc_list; i; i=i->next)
igmp_group_dropped(i); igmp_group_dropped(i);
...@@ -1331,9 +1324,6 @@ void ip_mc_init_dev(struct in_device *in_dev) ...@@ -1331,9 +1324,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
{ {
ASSERT_RTNL(); ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
in_dev->mc_tomb = NULL; in_dev->mc_tomb = NULL;
#ifdef CONFIG_IP_MULTICAST #ifdef CONFIG_IP_MULTICAST
in_dev->mr_gq_running = 0; in_dev->mr_gq_running = 0;
...@@ -1357,9 +1347,6 @@ void ip_mc_up(struct in_device *in_dev) ...@@ -1357,9 +1347,6 @@ void ip_mc_up(struct in_device *in_dev)
ASSERT_RTNL(); ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for (i=in_dev->mc_list; i; i=i->next) for (i=in_dev->mc_list; i; i=i->next)
...@@ -1376,9 +1363,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev) ...@@ -1376,9 +1363,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
ASSERT_RTNL(); ASSERT_RTNL();
if (!net_eq(dev_net(in_dev->dev), &init_net))
return;
/* Deactivate timers */ /* Deactivate timers */
ip_mc_down(in_dev); ip_mc_down(in_dev);
...@@ -1395,7 +1379,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev) ...@@ -1395,7 +1379,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
write_unlock_bh(&in_dev->mc_list_lock); write_unlock_bh(&in_dev->mc_list_lock);
} }
static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
{ {
struct flowi fl = { .nl_u = { .ip4_u = struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = imr->imr_multiaddr.s_addr } } }; { .daddr = imr->imr_multiaddr.s_addr } } };
...@@ -1404,19 +1388,19 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) ...@@ -1404,19 +1388,19 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
struct in_device *idev = NULL; struct in_device *idev = NULL;
if (imr->imr_ifindex) { if (imr->imr_ifindex) {
idev = inetdev_by_index(&init_net, imr->imr_ifindex); idev = inetdev_by_index(net, imr->imr_ifindex);
if (idev) if (idev)
__in_dev_put(idev); __in_dev_put(idev);
return idev; return idev;
} }
if (imr->imr_address.s_addr) { if (imr->imr_address.s_addr) {
dev = ip_dev_find(&init_net, imr->imr_address.s_addr); dev = ip_dev_find(net, imr->imr_address.s_addr);
if (!dev) if (!dev)
return NULL; return NULL;
dev_put(dev); dev_put(dev);
} }
if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) { if (!dev && !ip_route_output_key(net, &rt, &fl)) {
dev = rt->u.dst.dev; dev = rt->u.dst.dev;
ip_rt_put(rt); ip_rt_put(rt);
} }
...@@ -1754,18 +1738,16 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) ...@@ -1754,18 +1738,16 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
struct ip_mc_socklist *iml=NULL, *i; struct ip_mc_socklist *iml=NULL, *i;
struct in_device *in_dev; struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
int ifindex; int ifindex;
int count = 0; int count = 0;
if (!ipv4_is_multicast(addr)) if (!ipv4_is_multicast(addr))
return -EINVAL; return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock(); rtnl_lock();
in_dev = ip_mc_find_dev(imr); in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) { if (!in_dev) {
iml = NULL; iml = NULL;
...@@ -1827,15 +1809,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) ...@@ -1827,15 +1809,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml, **imlp; struct ip_mc_socklist *iml, **imlp;
struct in_device *in_dev; struct in_device *in_dev;
struct net *net = sock_net(sk);
__be32 group = imr->imr_multiaddr.s_addr; __be32 group = imr->imr_multiaddr.s_addr;
u32 ifindex; u32 ifindex;
int ret = -EADDRNOTAVAIL; int ret = -EADDRNOTAVAIL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock(); rtnl_lock();
in_dev = ip_mc_find_dev(imr); in_dev = ip_mc_find_dev(net, imr);
ifindex = imr->imr_ifindex; ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
if (iml->multi.imr_multiaddr.s_addr != group) if (iml->multi.imr_multiaddr.s_addr != group)
...@@ -1873,21 +1853,19 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct ...@@ -1873,21 +1853,19 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
struct in_device *in_dev = NULL; struct in_device *in_dev = NULL;
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl; struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
int leavegroup = 0; int leavegroup = 0;
int i, j, rv; int i, j, rv;
if (!ipv4_is_multicast(addr)) if (!ipv4_is_multicast(addr))
return -EINVAL; return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock(); rtnl_lock();
imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
imr.imr_address.s_addr = mreqs->imr_interface; imr.imr_address.s_addr = mreqs->imr_interface;
imr.imr_ifindex = ifindex; imr.imr_ifindex = ifindex;
in_dev = ip_mc_find_dev(&imr); in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) { if (!in_dev) {
err = -ENODEV; err = -ENODEV;
...@@ -2007,6 +1985,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) ...@@ -2007,6 +1985,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
struct in_device *in_dev; struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *newpsl, *psl; struct ip_sf_socklist *newpsl, *psl;
struct net *net = sock_net(sk);
int leavegroup = 0; int leavegroup = 0;
if (!ipv4_is_multicast(addr)) if (!ipv4_is_multicast(addr))
...@@ -2015,15 +1994,12 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) ...@@ -2015,15 +1994,12 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
msf->imsf_fmode != MCAST_EXCLUDE) msf->imsf_fmode != MCAST_EXCLUDE)
return -EINVAL; return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock(); rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface; imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = ifindex; imr.imr_ifindex = ifindex;
in_dev = ip_mc_find_dev(&imr); in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) { if (!in_dev) {
err = -ENODEV; err = -ENODEV;
...@@ -2094,19 +2070,17 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, ...@@ -2094,19 +2070,17 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct in_device *in_dev; struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl; struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
if (!ipv4_is_multicast(addr)) if (!ipv4_is_multicast(addr))
return -EINVAL; return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock(); rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface; imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = 0; imr.imr_ifindex = 0;
in_dev = ip_mc_find_dev(&imr); in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) { if (!in_dev) {
err = -ENODEV; err = -ENODEV;
...@@ -2163,9 +2137,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, ...@@ -2163,9 +2137,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
if (!ipv4_is_multicast(addr)) if (!ipv4_is_multicast(addr))
return -EINVAL; return -EINVAL;
if (!net_eq(sock_net(sk), &init_net))
return -EPROTONOSUPPORT;
rtnl_lock(); rtnl_lock();
err = -EADDRNOTAVAIL; err = -EADDRNOTAVAIL;
...@@ -2246,19 +2217,17 @@ void ip_mc_drop_socket(struct sock *sk) ...@@ -2246,19 +2217,17 @@ void ip_mc_drop_socket(struct sock *sk)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml; struct ip_mc_socklist *iml;
struct net *net = sock_net(sk);
if (inet->mc_list == NULL) if (inet->mc_list == NULL)
return; return;
if (!net_eq(sock_net(sk), &init_net))
return;
rtnl_lock(); rtnl_lock();
while ((iml = inet->mc_list) != NULL) { while ((iml = inet->mc_list) != NULL) {
struct in_device *in_dev; struct in_device *in_dev;
inet->mc_list = iml->next; inet->mc_list = iml->next;
in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex); in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev); (void) ip_mc_leave_src(sk, iml, in_dev);
if (in_dev != NULL) { if (in_dev != NULL) {
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
......
...@@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, ...@@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
} }
int ip_vs_app_init(void) int __init ip_vs_app_init(void)
{ {
/* we will replace it with proc_net_ipvs_create() soon */ /* we will replace it with proc_net_ipvs_create() soon */
proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
......
...@@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void) ...@@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void)
} }
int ip_vs_conn_init(void) int __init ip_vs_conn_init(void)
{ {
int idx; int idx;
......
...@@ -683,9 +683,22 @@ static void ...@@ -683,9 +683,22 @@ static void
ip_vs_zero_stats(struct ip_vs_stats *stats) ip_vs_zero_stats(struct ip_vs_stats *stats)
{ {
spin_lock_bh(&stats->lock); spin_lock_bh(&stats->lock);
memset(stats, 0, (char *)&stats->lock - (char *)stats);
spin_unlock_bh(&stats->lock); stats->conns = 0;
stats->inpkts = 0;
stats->outpkts = 0;
stats->inbytes = 0;
stats->outbytes = 0;
stats->cps = 0;
stats->inpps = 0;
stats->outpps = 0;
stats->inbps = 0;
stats->outbps = 0;
ip_vs_zero_estimator(stats); ip_vs_zero_estimator(stats);
spin_unlock_bh(&stats->lock);
} }
/* /*
...@@ -1589,7 +1602,7 @@ static struct ctl_table vs_vars[] = { ...@@ -1589,7 +1602,7 @@ static struct ctl_table vs_vars[] = {
{ .ctl_name = 0 } { .ctl_name = 0 }
}; };
struct ctl_path net_vs_ctl_path[] = { const struct ctl_path net_vs_ctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, }, { .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "ipv4", .ctl_name = NET_IPV4, }, { .procname = "ipv4", .ctl_name = NET_IPV4, },
{ .procname = "vs", }, { .procname = "vs", },
...@@ -1784,7 +1797,9 @@ static const struct file_operations ip_vs_info_fops = { ...@@ -1784,7 +1797,9 @@ static const struct file_operations ip_vs_info_fops = {
#endif #endif
struct ip_vs_stats ip_vs_stats; struct ip_vs_stats ip_vs_stats = {
.lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
};
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static int ip_vs_stats_show(struct seq_file *seq, void *v) static int ip_vs_stats_show(struct seq_file *seq, void *v)
...@@ -2306,7 +2321,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = { ...@@ -2306,7 +2321,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
}; };
int ip_vs_control_init(void) int __init ip_vs_control_init(void)
{ {
int ret; int ret;
int idx; int idx;
...@@ -2333,8 +2348,6 @@ int ip_vs_control_init(void) ...@@ -2333,8 +2348,6 @@ int ip_vs_control_init(void)
INIT_LIST_HEAD(&ip_vs_rtable[idx]); INIT_LIST_HEAD(&ip_vs_rtable[idx]);
} }
memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
spin_lock_init(&ip_vs_stats.lock);
ip_vs_new_estimator(&ip_vs_stats); ip_vs_new_estimator(&ip_vs_stats);
/* Hook the defense timer */ /* Hook the defense timer */
......
...@@ -233,6 +233,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler = ...@@ -233,6 +233,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
.name = "dh", .name = "dh",
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
.init_service = ip_vs_dh_init_svc, .init_service = ip_vs_dh_init_svc,
.done_service = ip_vs_dh_done_svc, .done_service = ip_vs_dh_done_svc,
.update_service = ip_vs_dh_update_svc, .update_service = ip_vs_dh_update_svc,
...@@ -242,7 +243,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler = ...@@ -242,7 +243,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
static int __init ip_vs_dh_init(void) static int __init ip_vs_dh_init(void)
{ {
INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_dh_scheduler); return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/list.h>
#include <net/ip_vs.h> #include <net/ip_vs.h>
...@@ -44,28 +45,11 @@ ...@@ -44,28 +45,11 @@
*/ */
struct ip_vs_estimator static void estimation_timer(unsigned long arg);
{
struct ip_vs_estimator *next;
struct ip_vs_stats *stats;
u32 last_conns;
u32 last_inpkts;
u32 last_outpkts;
u64 last_inbytes;
u64 last_outbytes;
u32 cps;
u32 inpps;
u32 outpps;
u32 inbps;
u32 outbps;
};
static struct ip_vs_estimator *est_list = NULL; static LIST_HEAD(est_list);
static DEFINE_RWLOCK(est_lock); static DEFINE_SPINLOCK(est_lock);
static struct timer_list est_timer; static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
static void estimation_timer(unsigned long arg) static void estimation_timer(unsigned long arg)
{ {
...@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg) ...@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg)
u64 n_inbytes, n_outbytes; u64 n_inbytes, n_outbytes;
u32 rate; u32 rate;
read_lock(&est_lock); spin_lock(&est_lock);
for (e = est_list; e; e = e->next) { list_for_each_entry(e, &est_list, list) {
s = e->stats; s = container_of(e, struct ip_vs_stats, est);
spin_lock(&s->lock); spin_lock(&s->lock);
n_conns = s->conns; n_conns = s->conns;
...@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg) ...@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg)
s->outbps = (e->outbps+0xF)>>5; s->outbps = (e->outbps+0xF)>>5;
spin_unlock(&s->lock); spin_unlock(&s->lock);
} }
read_unlock(&est_lock); spin_unlock(&est_lock);
mod_timer(&est_timer, jiffies + 2*HZ); mod_timer(&est_timer, jiffies + 2*HZ);
} }
int ip_vs_new_estimator(struct ip_vs_stats *stats) void ip_vs_new_estimator(struct ip_vs_stats *stats)
{ {
struct ip_vs_estimator *est; struct ip_vs_estimator *est = &stats->est;
est = kzalloc(sizeof(*est), GFP_KERNEL); INIT_LIST_HEAD(&est->list);
if (est == NULL)
return -ENOMEM;
est->stats = stats;
est->last_conns = stats->conns; est->last_conns = stats->conns;
est->cps = stats->cps<<10; est->cps = stats->cps<<10;
...@@ -142,59 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats) ...@@ -142,59 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
est->last_outbytes = stats->outbytes; est->last_outbytes = stats->outbytes;
est->outbps = stats->outbps<<5; est->outbps = stats->outbps<<5;
write_lock_bh(&est_lock); spin_lock_bh(&est_lock);
est->next = est_list; if (list_empty(&est_list))
if (est->next == NULL) { mod_timer(&est_timer, jiffies + 2 * HZ);
setup_timer(&est_timer, estimation_timer, 0); list_add(&est->list, &est_list);
est_timer.expires = jiffies + 2*HZ; spin_unlock_bh(&est_lock);
add_timer(&est_timer);
}
est_list = est;
write_unlock_bh(&est_lock);
return 0;
} }
void ip_vs_kill_estimator(struct ip_vs_stats *stats) void ip_vs_kill_estimator(struct ip_vs_stats *stats)
{ {
struct ip_vs_estimator *est, **pest; struct ip_vs_estimator *est = &stats->est;
int killed = 0;
spin_lock_bh(&est_lock);
write_lock_bh(&est_lock); list_del(&est->list);
pest = &est_list; while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
while ((est=*pest) != NULL) { spin_unlock_bh(&est_lock);
if (est->stats != stats) { cpu_relax();
pest = &est->next; spin_lock_bh(&est_lock);
continue;
}
*pest = est->next;
kfree(est);
killed++;
} }
if (killed && est_list == NULL) spin_unlock_bh(&est_lock);
del_timer_sync(&est_timer);
write_unlock_bh(&est_lock);
} }
void ip_vs_zero_estimator(struct ip_vs_stats *stats) void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{ {
struct ip_vs_estimator *e; struct ip_vs_estimator *est = &stats->est;
write_lock_bh(&est_lock); /* set counters zero, caller must hold the stats->lock lock */
for (e = est_list; e; e = e->next) { est->last_inbytes = 0;
if (e->stats != stats) est->last_outbytes = 0;
continue; est->last_conns = 0;
est->last_inpkts = 0;
/* set counters zero */ est->last_outpkts = 0;
e->last_conns = 0; est->cps = 0;
e->last_inpkts = 0; est->inpps = 0;
e->last_outpkts = 0; est->outpps = 0;
e->last_inbytes = 0; est->inbps = 0;
e->last_outbytes = 0; est->outbps = 0;
e->cps = 0;
e->inpps = 0;
e->outpps = 0;
e->inbps = 0;
e->outbps = 0;
}
write_unlock_bh(&est_lock);
} }
...@@ -539,6 +539,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler = ...@@ -539,6 +539,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
.name = "lblc", .name = "lblc",
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
.init_service = ip_vs_lblc_init_svc, .init_service = ip_vs_lblc_init_svc,
.done_service = ip_vs_lblc_done_svc, .done_service = ip_vs_lblc_done_svc,
.update_service = ip_vs_lblc_update_svc, .update_service = ip_vs_lblc_update_svc,
...@@ -550,7 +551,6 @@ static int __init ip_vs_lblc_init(void) ...@@ -550,7 +551,6 @@ static int __init ip_vs_lblc_init(void)
{ {
int ret; int ret;
INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler); ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret) if (ret)
......
...@@ -728,6 +728,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler = ...@@ -728,6 +728,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
.name = "lblcr", .name = "lblcr",
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
.init_service = ip_vs_lblcr_init_svc, .init_service = ip_vs_lblcr_init_svc,
.done_service = ip_vs_lblcr_done_svc, .done_service = ip_vs_lblcr_done_svc,
.update_service = ip_vs_lblcr_update_svc, .update_service = ip_vs_lblcr_update_svc,
...@@ -739,7 +740,6 @@ static int __init ip_vs_lblcr_init(void) ...@@ -739,7 +740,6 @@ static int __init ip_vs_lblcr_init(void)
{ {
int ret; int ret;
INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret) if (ret)
......
...@@ -98,6 +98,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { ...@@ -98,6 +98,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
.name = "lc", .name = "lc",
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
.init_service = ip_vs_lc_init_svc, .init_service = ip_vs_lc_init_svc,
.done_service = ip_vs_lc_done_svc, .done_service = ip_vs_lc_done_svc,
.update_service = ip_vs_lc_update_svc, .update_service = ip_vs_lc_update_svc,
...@@ -107,7 +108,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { ...@@ -107,7 +108,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
static int __init ip_vs_lc_init(void) static int __init ip_vs_lc_init(void)
{ {
INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ; return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ;
} }
......
...@@ -136,6 +136,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = ...@@ -136,6 +136,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
.name = "nq", .name = "nq",
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
.init_service = ip_vs_nq_init_svc, .init_service = ip_vs_nq_init_svc,
.done_service = ip_vs_nq_done_svc, .done_service = ip_vs_nq_done_svc,
.update_service = ip_vs_nq_update_svc, .update_service = ip_vs_nq_update_svc,
...@@ -145,7 +146,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = ...@@ -145,7 +146,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
static int __init ip_vs_nq_init(void) static int __init ip_vs_nq_init(void)
{ {
INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_nq_scheduler); return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
} }
......
...@@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; ...@@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
/* /*
* register an ipvs protocol * register an ipvs protocol
*/ */
static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp) static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
{ {
unsigned hash = IP_VS_PROTO_HASH(pp->protocol); unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
...@@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, ...@@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
} }
int ip_vs_protocol_init(void) int __init ip_vs_protocol_init(void)
{ {
char protocols[64]; char protocols[64];
#define REGISTER_PROTOCOL(p) \ #define REGISTER_PROTOCOL(p) \
......
...@@ -94,6 +94,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { ...@@ -94,6 +94,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
.name = "rr", /* name */ .name = "rr", /* name */
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
.init_service = ip_vs_rr_init_svc, .init_service = ip_vs_rr_init_svc,
.done_service = ip_vs_rr_done_svc, .done_service = ip_vs_rr_done_svc,
.update_service = ip_vs_rr_update_svc, .update_service = ip_vs_rr_update_svc,
...@@ -102,7 +103,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { ...@@ -102,7 +103,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
static int __init ip_vs_rr_init(void) static int __init ip_vs_rr_init(void)
{ {
INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_rr_scheduler); return register_ip_vs_scheduler(&ip_vs_rr_scheduler);
} }
......
...@@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) ...@@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
write_lock_bh(&__ip_vs_sched_lock); write_lock_bh(&__ip_vs_sched_lock);
if (scheduler->n_list.next != &scheduler->n_list) { if (!list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock); write_unlock_bh(&__ip_vs_sched_lock);
ip_vs_use_count_dec(); ip_vs_use_count_dec();
IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
...@@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) ...@@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
} }
write_lock_bh(&__ip_vs_sched_lock); write_lock_bh(&__ip_vs_sched_lock);
if (scheduler->n_list.next == &scheduler->n_list) { if (list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock); write_unlock_bh(&__ip_vs_sched_lock);
IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler " IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler "
"is not in the list. failed\n", scheduler->name); "is not in the list. failed\n", scheduler->name);
......
...@@ -138,6 +138,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = ...@@ -138,6 +138,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
.name = "sed", .name = "sed",
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
.init_service = ip_vs_sed_init_svc, .init_service = ip_vs_sed_init_svc,
.done_service = ip_vs_sed_done_svc, .done_service = ip_vs_sed_done_svc,
.update_service = ip_vs_sed_update_svc, .update_service = ip_vs_sed_update_svc,
...@@ -147,7 +148,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = ...@@ -147,7 +148,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
static int __init ip_vs_sed_init(void) static int __init ip_vs_sed_init(void)
{ {
INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_sed_scheduler); return register_ip_vs_scheduler(&ip_vs_sed_scheduler);
} }
......
...@@ -230,6 +230,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler = ...@@ -230,6 +230,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
.name = "sh", .name = "sh",
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
.init_service = ip_vs_sh_init_svc, .init_service = ip_vs_sh_init_svc,
.done_service = ip_vs_sh_done_svc, .done_service = ip_vs_sh_done_svc,
.update_service = ip_vs_sh_update_svc, .update_service = ip_vs_sh_update_svc,
...@@ -239,7 +240,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler = ...@@ -239,7 +240,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
static int __init ip_vs_sh_init(void) static int __init ip_vs_sh_init(void)
{ {
INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_sh_scheduler); return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
} }
......
...@@ -904,9 +904,9 @@ int stop_sync_thread(int state) ...@@ -904,9 +904,9 @@ int stop_sync_thread(int state)
* progress of stopping the master sync daemon. * progress of stopping the master sync daemon.
*/ */
spin_lock(&ip_vs_sync_lock); spin_lock_bh(&ip_vs_sync_lock);
ip_vs_sync_state &= ~IP_VS_STATE_MASTER; ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
spin_unlock(&ip_vs_sync_lock); spin_unlock_bh(&ip_vs_sync_lock);
kthread_stop(sync_master_thread); kthread_stop(sync_master_thread);
sync_master_thread = NULL; sync_master_thread = NULL;
} else if (state == IP_VS_STATE_BACKUP) { } else if (state == IP_VS_STATE_BACKUP) {
......
...@@ -126,6 +126,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = ...@@ -126,6 +126,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
.name = "wlc", .name = "wlc",
.refcnt = ATOMIC_INIT(0), .refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE, .module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
.init_service = ip_vs_wlc_init_svc, .init_service = ip_vs_wlc_init_svc,
.done_service = ip_vs_wlc_done_svc, .done_service = ip_vs_wlc_done_svc,
.update_service = ip_vs_wlc_update_svc, .update_service = ip_vs_wlc_update_svc,
...@@ -135,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = ...@@ -135,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
static int __init ip_vs_wlc_init(void) static int __init ip_vs_wlc_init(void)
{ {
INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_wlc_scheduler); return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -1277,6 +1277,7 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, ...@@ -1277,6 +1277,7 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev,
r->ifi_flags = dev_get_flags(dev); r->ifi_flags = dev_get_flags(dev);
r->ifi_change = 0; /* Wireless changes don't affect those flags */ r->ifi_change = 0; /* Wireless changes don't affect those flags */
NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
/* Add the wireless events in the netlink packet */ /* Add the wireless events in the netlink packet */
NLA_PUT(skb, IFLA_WIRELESS, event_len, event); NLA_PUT(skb, IFLA_WIRELESS, event_len, event);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment