Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
258daca2
Commit
258daca2
authored
Oct 11, 2011
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
parents
3ed6f695
077887c3
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
411 additions
and
334 deletions
+411
-334
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_defines.h
+3
-0
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb.h
+36
-17
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
+6
-8
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_main.c
+366
-309
No files found.
drivers/net/ethernet/intel/igb/e1000_defines.h
View file @
258daca2
...
@@ -409,6 +409,9 @@
...
@@ -409,6 +409,9 @@
#define E1000_ICS_DRSTA E1000_ICR_DRSTA
/* Device Reset Aserted */
#define E1000_ICS_DRSTA E1000_ICR_DRSTA
/* Device Reset Aserted */
/* Extended Interrupt Cause Set */
/* Extended Interrupt Cause Set */
/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
#define E1000_EITR_CNT_IGNR 0x80000000
/* Don't reset counters on write */
/* Transmit Descriptor Control */
/* Transmit Descriptor Control */
/* Enable the counting of descriptors still to be processed. */
/* Enable the counting of descriptors still to be processed. */
...
...
drivers/net/ethernet/intel/igb/igb.h
View file @
258daca2
...
@@ -42,8 +42,11 @@
...
@@ -42,8 +42,11 @@
struct
igb_adapter
;
struct
igb_adapter
;
/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
/* Interrupt defines */
#define IGB_START_ITR 648
#define IGB_START_ITR 648
/* ~6000 ints/sec */
#define IGB_4K_ITR 980
#define IGB_20K_ITR 196
#define IGB_70K_ITR 56
/* TX/RX descriptor defines */
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TXD 256
...
@@ -146,6 +149,7 @@ struct igb_tx_buffer {
...
@@ -146,6 +149,7 @@ struct igb_tx_buffer {
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
unsigned
int
bytecount
;
unsigned
int
bytecount
;
u16
gso_segs
;
u16
gso_segs
;
__be16
protocol
;
dma_addr_t
dma
;
dma_addr_t
dma
;
u32
length
;
u32
length
;
u32
tx_flags
;
u32
tx_flags
;
...
@@ -174,15 +178,24 @@ struct igb_rx_queue_stats {
...
@@ -174,15 +178,24 @@ struct igb_rx_queue_stats {
u64
alloc_failed
;
u64
alloc_failed
;
};
};
struct
igb_ring_container
{
struct
igb_ring
*
ring
;
/* pointer to linked list of rings */
unsigned
int
total_bytes
;
/* total bytes processed this int */
unsigned
int
total_packets
;
/* total packets processed this int */
u16
work_limit
;
/* total work allowed per interrupt */
u8
count
;
/* total number of rings in vector */
u8
itr
;
/* current ITR setting for ring */
};
struct
igb_q_vector
{
struct
igb_q_vector
{
struct
igb_adapter
*
adapter
;
/* backlink */
struct
igb_adapter
*
adapter
;
/* backlink */
struct
igb_ring
*
rx_ring
;
int
cpu
;
/* CPU for DCA */
struct
igb_ring
*
tx_ring
;
u32
eims_value
;
/* EIMS mask value */
struct
napi_struct
napi
;
u32
eims_value
;
struct
igb_ring_container
rx
,
tx
;
u16
cpu
;
u16
tx_work_limit
;
struct
napi_struct
napi
;
int
numa_node
;
u16
itr_val
;
u16
itr_val
;
u8
set_itr
;
u8
set_itr
;
...
@@ -212,16 +225,12 @@ struct igb_ring {
...
@@ -212,16 +225,12 @@ struct igb_ring {
u16
next_to_clean
____cacheline_aligned_in_smp
;
u16
next_to_clean
____cacheline_aligned_in_smp
;
u16
next_to_use
;
u16
next_to_use
;
unsigned
int
total_bytes
;
unsigned
int
total_packets
;
union
{
union
{
/* TX */
/* TX */
struct
{
struct
{
struct
igb_tx_queue_stats
tx_stats
;
struct
igb_tx_queue_stats
tx_stats
;
struct
u64_stats_sync
tx_syncp
;
struct
u64_stats_sync
tx_syncp
;
struct
u64_stats_sync
tx_syncp2
;
struct
u64_stats_sync
tx_syncp2
;
bool
detect_tx_hung
;
};
};
/* RX */
/* RX */
struct
{
struct
{
...
@@ -231,12 +240,14 @@ struct igb_ring {
...
@@ -231,12 +240,14 @@ struct igb_ring {
};
};
/* Items past this point are only used during ring alloc / free */
/* Items past this point are only used during ring alloc / free */
dma_addr_t
dma
;
/* phys address of the ring */
dma_addr_t
dma
;
/* phys address of the ring */
int
numa_node
;
/* node to alloc ring memory on */
};
};
#define IGB_RING_FLAG_RX_CSUM 0x00000001
/* RX CSUM enabled */
enum
e1000_ring_flags_t
{
#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002
/* SCTP CSUM offload enabled */
IGB_RING_FLAG_RX_SCTP_CSUM
,
IGB_RING_FLAG_TX_CTX_IDX
,
#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001
/* HW requires context index */
IGB_RING_FLAG_TX_DETECT_HANG
};
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
...
@@ -247,6 +258,13 @@ struct igb_ring {
...
@@ -247,6 +258,13 @@ struct igb_ring {
#define IGB_TX_CTXTDESC(R, i) \
#define IGB_TX_CTXTDESC(R, i) \
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
static
inline
__le32
igb_test_staterr
(
union
e1000_adv_rx_desc
*
rx_desc
,
const
u32
stat_err_bits
)
{
return
rx_desc
->
wb
.
upper
.
status_error
&
cpu_to_le32
(
stat_err_bits
);
}
/* igb_desc_unused - calculate if we have unused descriptors */
/* igb_desc_unused - calculate if we have unused descriptors */
static
inline
int
igb_desc_unused
(
struct
igb_ring
*
ring
)
static
inline
int
igb_desc_unused
(
struct
igb_ring
*
ring
)
{
{
...
@@ -340,6 +358,7 @@ struct igb_adapter {
...
@@ -340,6 +358,7 @@ struct igb_adapter {
int
vf_rate_link_speed
;
int
vf_rate_link_speed
;
u32
rss_queues
;
u32
rss_queues
;
u32
wvbr
;
u32
wvbr
;
int
node
;
};
};
#define IGB_FLAG_HAS_MSI (1 << 0)
#define IGB_FLAG_HAS_MSI (1 << 0)
...
...
drivers/net/ethernet/intel/igb/igb_ethtool.c
View file @
258daca2
...
@@ -1577,16 +1577,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
...
@@ -1577,16 +1577,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union
e1000_adv_rx_desc
*
rx_desc
;
union
e1000_adv_rx_desc
*
rx_desc
;
struct
igb_rx_buffer
*
rx_buffer_info
;
struct
igb_rx_buffer
*
rx_buffer_info
;
struct
igb_tx_buffer
*
tx_buffer_info
;
struct
igb_tx_buffer
*
tx_buffer_info
;
int
rx_ntc
,
tx_ntc
,
count
=
0
;
u16
rx_ntc
,
tx_ntc
,
count
=
0
;
u32
staterr
;
/* initialize next to clean and descriptor values */
/* initialize next to clean and descriptor values */
rx_ntc
=
rx_ring
->
next_to_clean
;
rx_ntc
=
rx_ring
->
next_to_clean
;
tx_ntc
=
tx_ring
->
next_to_clean
;
tx_ntc
=
tx_ring
->
next_to_clean
;
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
rx_ntc
);
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
rx_ntc
);
staterr
=
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
);
while
(
staterr
&
E1000_RXD_STAT_DD
)
{
while
(
igb_test_staterr
(
rx_desc
,
E1000_RXD_STAT_DD
)
)
{
/* check rx buffer */
/* check rx buffer */
rx_buffer_info
=
&
rx_ring
->
rx_buffer_info
[
rx_ntc
];
rx_buffer_info
=
&
rx_ring
->
rx_buffer_info
[
rx_ntc
];
...
@@ -1615,7 +1613,6 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
...
@@ -1615,7 +1613,6 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* fetch next descriptor */
/* fetch next descriptor */
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
rx_ntc
);
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
rx_ntc
);
staterr
=
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
);
}
}
/* re-map buffers to ring, store next to clean values */
/* re-map buffers to ring, store next to clean values */
...
@@ -1630,7 +1627,8 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
...
@@ -1630,7 +1627,8 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
{
{
struct
igb_ring
*
tx_ring
=
&
adapter
->
test_tx_ring
;
struct
igb_ring
*
tx_ring
=
&
adapter
->
test_tx_ring
;
struct
igb_ring
*
rx_ring
=
&
adapter
->
test_rx_ring
;
struct
igb_ring
*
rx_ring
=
&
adapter
->
test_rx_ring
;
int
i
,
j
,
lc
,
good_cnt
,
ret_val
=
0
;
u16
i
,
j
,
lc
,
good_cnt
;
int
ret_val
=
0
;
unsigned
int
size
=
IGB_RX_HDR_LEN
;
unsigned
int
size
=
IGB_RX_HDR_LEN
;
netdev_tx_t
tx_ret_val
;
netdev_tx_t
tx_ret_val
;
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
...
@@ -2008,8 +2006,8 @@ static int igb_set_coalesce(struct net_device *netdev,
...
@@ -2008,8 +2006,8 @@ static int igb_set_coalesce(struct net_device *netdev,
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
q_vector
->
tx
_
work_limit
=
adapter
->
tx_work_limit
;
q_vector
->
tx
.
work_limit
=
adapter
->
tx_work_limit
;
if
(
q_vector
->
rx
_
ring
)
if
(
q_vector
->
rx
.
ring
)
q_vector
->
itr_val
=
adapter
->
rx_itr_setting
;
q_vector
->
itr_val
=
adapter
->
rx_itr_setting
;
else
else
q_vector
->
itr_val
=
adapter
->
tx_itr_setting
;
q_vector
->
itr_val
=
adapter
->
tx_itr_setting
;
...
...
drivers/net/ethernet/intel/igb/igb_main.c
View file @
258daca2
...
@@ -338,14 +338,13 @@ static void igb_dump(struct igb_adapter *adapter)
...
@@ -338,14 +338,13 @@ static void igb_dump(struct igb_adapter *adapter)
struct
net_device
*
netdev
=
adapter
->
netdev
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
igb_reg_info
*
reginfo
;
struct
igb_reg_info
*
reginfo
;
int
n
=
0
;
struct
igb_ring
*
tx_ring
;
struct
igb_ring
*
tx_ring
;
union
e1000_adv_tx_desc
*
tx_desc
;
union
e1000_adv_tx_desc
*
tx_desc
;
struct
my_u0
{
u64
a
;
u64
b
;
}
*
u0
;
struct
my_u0
{
u64
a
;
u64
b
;
}
*
u0
;
struct
igb_ring
*
rx_ring
;
struct
igb_ring
*
rx_ring
;
union
e1000_adv_rx_desc
*
rx_desc
;
union
e1000_adv_rx_desc
*
rx_desc
;
u32
staterr
;
u32
staterr
;
int
i
=
0
;
u16
i
,
n
;
if
(
!
netif_msg_hw
(
adapter
))
if
(
!
netif_msg_hw
(
adapter
))
return
;
return
;
...
@@ -687,8 +686,18 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
...
@@ -687,8 +686,18 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
{
{
struct
igb_ring
*
ring
;
struct
igb_ring
*
ring
;
int
i
;
int
i
;
int
orig_node
=
adapter
->
node
;
for
(
i
=
0
;
i
<
adapter
->
num_tx_queues
;
i
++
)
{
for
(
i
=
0
;
i
<
adapter
->
num_tx_queues
;
i
++
)
{
if
(
orig_node
==
-
1
)
{
int
cur_node
=
next_online_node
(
adapter
->
node
);
if
(
cur_node
==
MAX_NUMNODES
)
cur_node
=
first_online_node
;
adapter
->
node
=
cur_node
;
}
ring
=
kzalloc_node
(
sizeof
(
struct
igb_ring
),
GFP_KERNEL
,
adapter
->
node
);
if
(
!
ring
)
ring
=
kzalloc
(
sizeof
(
struct
igb_ring
),
GFP_KERNEL
);
ring
=
kzalloc
(
sizeof
(
struct
igb_ring
),
GFP_KERNEL
);
if
(
!
ring
)
if
(
!
ring
)
goto
err
;
goto
err
;
...
@@ -696,13 +705,25 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
...
@@ -696,13 +705,25 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring
->
queue_index
=
i
;
ring
->
queue_index
=
i
;
ring
->
dev
=
&
adapter
->
pdev
->
dev
;
ring
->
dev
=
&
adapter
->
pdev
->
dev
;
ring
->
netdev
=
adapter
->
netdev
;
ring
->
netdev
=
adapter
->
netdev
;
ring
->
numa_node
=
adapter
->
node
;
/* For 82575, context index must be unique per ring. */
/* For 82575, context index must be unique per ring. */
if
(
adapter
->
hw
.
mac
.
type
==
e1000_82575
)
if
(
adapter
->
hw
.
mac
.
type
==
e1000_82575
)
ring
->
flags
=
IGB_RING_FLAG_TX_CTX_IDX
;
set_bit
(
IGB_RING_FLAG_TX_CTX_IDX
,
&
ring
->
flags
)
;
adapter
->
tx_ring
[
i
]
=
ring
;
adapter
->
tx_ring
[
i
]
=
ring
;
}
}
/* Restore the adapter's original node */
adapter
->
node
=
orig_node
;
for
(
i
=
0
;
i
<
adapter
->
num_rx_queues
;
i
++
)
{
for
(
i
=
0
;
i
<
adapter
->
num_rx_queues
;
i
++
)
{
if
(
orig_node
==
-
1
)
{
int
cur_node
=
next_online_node
(
adapter
->
node
);
if
(
cur_node
==
MAX_NUMNODES
)
cur_node
=
first_online_node
;
adapter
->
node
=
cur_node
;
}
ring
=
kzalloc_node
(
sizeof
(
struct
igb_ring
),
GFP_KERNEL
,
adapter
->
node
);
if
(
!
ring
)
ring
=
kzalloc
(
sizeof
(
struct
igb_ring
),
GFP_KERNEL
);
ring
=
kzalloc
(
sizeof
(
struct
igb_ring
),
GFP_KERNEL
);
if
(
!
ring
)
if
(
!
ring
)
goto
err
;
goto
err
;
...
@@ -710,37 +731,66 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
...
@@ -710,37 +731,66 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring
->
queue_index
=
i
;
ring
->
queue_index
=
i
;
ring
->
dev
=
&
adapter
->
pdev
->
dev
;
ring
->
dev
=
&
adapter
->
pdev
->
dev
;
ring
->
netdev
=
adapter
->
netdev
;
ring
->
netdev
=
adapter
->
netdev
;
ring
->
flags
=
IGB_RING_FLAG_RX_CSUM
;
/* enable rx checksum */
ring
->
numa_node
=
adapter
->
node
;
/* set flag indicating ring supports SCTP checksum offload */
/* set flag indicating ring supports SCTP checksum offload */
if
(
adapter
->
hw
.
mac
.
type
>=
e1000_82576
)
if
(
adapter
->
hw
.
mac
.
type
>=
e1000_82576
)
ring
->
flags
|=
IGB_RING_FLAG_RX_SCTP_CSUM
;
set_bit
(
IGB_RING_FLAG_RX_SCTP_CSUM
,
&
ring
->
flags
)
;
adapter
->
rx_ring
[
i
]
=
ring
;
adapter
->
rx_ring
[
i
]
=
ring
;
}
}
/* Restore the adapter's original node */
adapter
->
node
=
orig_node
;
igb_cache_ring_register
(
adapter
);
igb_cache_ring_register
(
adapter
);
return
0
;
return
0
;
err:
err:
/* Restore the adapter's original node */
adapter
->
node
=
orig_node
;
igb_free_queues
(
adapter
);
igb_free_queues
(
adapter
);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
/**
* igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure
* @msix_vector: vector number we are allocating to a given ring
* @index: row index of IVAR register to write within IVAR table
* @offset: column offset of in IVAR, should be multiple of 8
*
* This function is intended to handle the writing of the IVAR register
* for adapters 82576 and newer. The IVAR table consists of 2 columns,
* each containing an cause allocation for an Rx and Tx ring, and a
* variable number of rows depending on the number of queues supported.
**/
static
void
igb_write_ivar
(
struct
e1000_hw
*
hw
,
int
msix_vector
,
int
index
,
int
offset
)
{
u32
ivar
=
array_rd32
(
E1000_IVAR0
,
index
);
/* clear any bits that are currently set */
ivar
&=
~
((
u32
)
0xFF
<<
offset
);
/* write vector and valid bit */
ivar
|=
(
msix_vector
|
E1000_IVAR_VALID
)
<<
offset
;
array_wr32
(
E1000_IVAR0
,
index
,
ivar
);
}
#define IGB_N0_QUEUE -1
#define IGB_N0_QUEUE -1
static
void
igb_assign_vector
(
struct
igb_q_vector
*
q_vector
,
int
msix_vector
)
static
void
igb_assign_vector
(
struct
igb_q_vector
*
q_vector
,
int
msix_vector
)
{
{
u32
msixbm
=
0
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
u32
ivar
,
index
;
int
rx_queue
=
IGB_N0_QUEUE
;
int
rx_queue
=
IGB_N0_QUEUE
;
int
tx_queue
=
IGB_N0_QUEUE
;
int
tx_queue
=
IGB_N0_QUEUE
;
u32
msixbm
=
0
;
if
(
q_vector
->
rx
_
ring
)
if
(
q_vector
->
rx
.
ring
)
rx_queue
=
q_vector
->
rx
_
ring
->
reg_idx
;
rx_queue
=
q_vector
->
rx
.
ring
->
reg_idx
;
if
(
q_vector
->
tx
_
ring
)
if
(
q_vector
->
tx
.
ring
)
tx_queue
=
q_vector
->
tx
_
ring
->
reg_idx
;
tx_queue
=
q_vector
->
tx
.
ring
->
reg_idx
;
switch
(
hw
->
mac
.
type
)
{
switch
(
hw
->
mac
.
type
)
{
case
e1000_82575
:
case
e1000_82575
:
...
@@ -758,72 +808,39 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
...
@@ -758,72 +808,39 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
q_vector
->
eims_value
=
msixbm
;
q_vector
->
eims_value
=
msixbm
;
break
;
break
;
case
e1000_82576
:
case
e1000_82576
:
/* 82576 uses a table-based method for assigning vectors.
/*
Each queue has a single entry in the table to which we write
* 82576 uses a table that essentially consists of 2 columns
a vector number along with a "valid" bit. Sadly, the layout
* with 8 rows. The ordering is column-major so we use the
of the table is somewhat counterintuitive. */
* lower 3 bits as the row index, and the 4th bit as the
if
(
rx_queue
>
IGB_N0_QUEUE
)
{
* column offset.
index
=
(
rx_queue
&
0x7
);
*/
ivar
=
array_rd32
(
E1000_IVAR0
,
index
);
if
(
rx_queue
>
IGB_N0_QUEUE
)
if
(
rx_queue
<
8
)
{
igb_write_ivar
(
hw
,
msix_vector
,
/* vector goes into low byte of register */
rx_queue
&
0x7
,
ivar
=
ivar
&
0xFFFFFF00
;
(
rx_queue
&
0x8
)
<<
1
);
ivar
|=
msix_vector
|
E1000_IVAR_VALID
;
if
(
tx_queue
>
IGB_N0_QUEUE
)
}
else
{
igb_write_ivar
(
hw
,
msix_vector
,
/* vector goes into third byte of register */
tx_queue
&
0x7
,
ivar
=
ivar
&
0xFF00FFFF
;
((
tx_queue
&
0x8
)
<<
1
)
+
8
);
ivar
|=
(
msix_vector
|
E1000_IVAR_VALID
)
<<
16
;
}
array_wr32
(
E1000_IVAR0
,
index
,
ivar
);
}
if
(
tx_queue
>
IGB_N0_QUEUE
)
{
index
=
(
tx_queue
&
0x7
);
ivar
=
array_rd32
(
E1000_IVAR0
,
index
);
if
(
tx_queue
<
8
)
{
/* vector goes into second byte of register */
ivar
=
ivar
&
0xFFFF00FF
;
ivar
|=
(
msix_vector
|
E1000_IVAR_VALID
)
<<
8
;
}
else
{
/* vector goes into high byte of register */
ivar
=
ivar
&
0x00FFFFFF
;
ivar
|=
(
msix_vector
|
E1000_IVAR_VALID
)
<<
24
;
}
array_wr32
(
E1000_IVAR0
,
index
,
ivar
);
}
q_vector
->
eims_value
=
1
<<
msix_vector
;
q_vector
->
eims_value
=
1
<<
msix_vector
;
break
;
break
;
case
e1000_82580
:
case
e1000_82580
:
case
e1000_i350
:
case
e1000_i350
:
/* 82580 uses the same table-based approach as 82576 but has fewer
/*
entries as a result we carry over for queues greater than 4. */
* On 82580 and newer adapters the scheme is similar to 82576
if
(
rx_queue
>
IGB_N0_QUEUE
)
{
* however instead of ordering column-major we have things
index
=
(
rx_queue
>>
1
);
* ordered row-major. So we traverse the table by using
ivar
=
array_rd32
(
E1000_IVAR0
,
index
);
* bit 0 as the column offset, and the remaining bits as the
if
(
rx_queue
&
0x1
)
{
* row index.
/* vector goes into third byte of register */
*/
ivar
=
ivar
&
0xFF00FFFF
;
if
(
rx_queue
>
IGB_N0_QUEUE
)
ivar
|=
(
msix_vector
|
E1000_IVAR_VALID
)
<<
16
;
igb_write_ivar
(
hw
,
msix_vector
,
}
else
{
rx_queue
>>
1
,
/* vector goes into low byte of register */
(
rx_queue
&
0x1
)
<<
4
);
ivar
=
ivar
&
0xFFFFFF00
;
if
(
tx_queue
>
IGB_N0_QUEUE
)
ivar
|=
msix_vector
|
E1000_IVAR_VALID
;
igb_write_ivar
(
hw
,
msix_vector
,
}
tx_queue
>>
1
,
array_wr32
(
E1000_IVAR0
,
index
,
ivar
);
((
tx_queue
&
0x1
)
<<
4
)
+
8
);
}
if
(
tx_queue
>
IGB_N0_QUEUE
)
{
index
=
(
tx_queue
>>
1
);
ivar
=
array_rd32
(
E1000_IVAR0
,
index
);
if
(
tx_queue
&
0x1
)
{
/* vector goes into high byte of register */
ivar
=
ivar
&
0x00FFFFFF
;
ivar
|=
(
msix_vector
|
E1000_IVAR_VALID
)
<<
24
;
}
else
{
/* vector goes into second byte of register */
ivar
=
ivar
&
0xFFFF00FF
;
ivar
|=
(
msix_vector
|
E1000_IVAR_VALID
)
<<
8
;
}
array_wr32
(
E1000_IVAR0
,
index
,
ivar
);
}
q_vector
->
eims_value
=
1
<<
msix_vector
;
q_vector
->
eims_value
=
1
<<
msix_vector
;
break
;
break
;
default:
default:
...
@@ -923,15 +940,15 @@ static int igb_request_msix(struct igb_adapter *adapter)
...
@@ -923,15 +940,15 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector
->
itr_register
=
hw
->
hw_addr
+
E1000_EITR
(
vector
);
q_vector
->
itr_register
=
hw
->
hw_addr
+
E1000_EITR
(
vector
);
if
(
q_vector
->
rx
_ring
&&
q_vector
->
tx_
ring
)
if
(
q_vector
->
rx
.
ring
&&
q_vector
->
tx
.
ring
)
sprintf
(
q_vector
->
name
,
"%s-TxRx-%u"
,
netdev
->
name
,
sprintf
(
q_vector
->
name
,
"%s-TxRx-%u"
,
netdev
->
name
,
q_vector
->
rx_
ring
->
queue_index
);
q_vector
->
rx
.
ring
->
queue_index
);
else
if
(
q_vector
->
tx
_
ring
)
else
if
(
q_vector
->
tx
.
ring
)
sprintf
(
q_vector
->
name
,
"%s-tx-%u"
,
netdev
->
name
,
sprintf
(
q_vector
->
name
,
"%s-tx-%u"
,
netdev
->
name
,
q_vector
->
tx_
ring
->
queue_index
);
q_vector
->
tx
.
ring
->
queue_index
);
else
if
(
q_vector
->
rx
_
ring
)
else
if
(
q_vector
->
rx
.
ring
)
sprintf
(
q_vector
->
name
,
"%s-rx-%u"
,
netdev
->
name
,
sprintf
(
q_vector
->
name
,
"%s-rx-%u"
,
netdev
->
name
,
q_vector
->
rx_
ring
->
queue_index
);
q_vector
->
rx
.
ring
->
queue_index
);
else
else
sprintf
(
q_vector
->
name
,
"%s-unused"
,
netdev
->
name
);
sprintf
(
q_vector
->
name
,
"%s-unused"
,
netdev
->
name
);
...
@@ -1087,9 +1104,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
...
@@ -1087,9 +1104,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
struct
igb_q_vector
*
q_vector
;
struct
igb_q_vector
*
q_vector
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
int
v_idx
;
int
v_idx
;
int
orig_node
=
adapter
->
node
;
for
(
v_idx
=
0
;
v_idx
<
adapter
->
num_q_vectors
;
v_idx
++
)
{
for
(
v_idx
=
0
;
v_idx
<
adapter
->
num_q_vectors
;
v_idx
++
)
{
q_vector
=
kzalloc
(
sizeof
(
struct
igb_q_vector
),
GFP_KERNEL
);
if
((
adapter
->
num_q_vectors
==
(
adapter
->
num_rx_queues
+
adapter
->
num_tx_queues
))
&&
(
adapter
->
num_rx_queues
==
v_idx
))
adapter
->
node
=
orig_node
;
if
(
orig_node
==
-
1
)
{
int
cur_node
=
next_online_node
(
adapter
->
node
);
if
(
cur_node
==
MAX_NUMNODES
)
cur_node
=
first_online_node
;
adapter
->
node
=
cur_node
;
}
q_vector
=
kzalloc_node
(
sizeof
(
struct
igb_q_vector
),
GFP_KERNEL
,
adapter
->
node
);
if
(
!
q_vector
)
q_vector
=
kzalloc
(
sizeof
(
struct
igb_q_vector
),
GFP_KERNEL
);
if
(
!
q_vector
)
if
(
!
q_vector
)
goto
err_out
;
goto
err_out
;
q_vector
->
adapter
=
adapter
;
q_vector
->
adapter
=
adapter
;
...
@@ -1098,9 +1130,14 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
...
@@ -1098,9 +1130,14 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
netif_napi_add
(
adapter
->
netdev
,
&
q_vector
->
napi
,
igb_poll
,
64
);
netif_napi_add
(
adapter
->
netdev
,
&
q_vector
->
napi
,
igb_poll
,
64
);
adapter
->
q_vector
[
v_idx
]
=
q_vector
;
adapter
->
q_vector
[
v_idx
]
=
q_vector
;
}
}
/* Restore the adapter's original node */
adapter
->
node
=
orig_node
;
return
0
;
return
0
;
err_out:
err_out:
/* Restore the adapter's original node */
adapter
->
node
=
orig_node
;
igb_free_q_vectors
(
adapter
);
igb_free_q_vectors
(
adapter
);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
...
@@ -1110,8 +1147,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
...
@@ -1110,8 +1147,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
{
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
v_idx
];
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
v_idx
];
q_vector
->
rx_ring
=
adapter
->
rx_ring
[
ring_idx
];
q_vector
->
rx
.
ring
=
adapter
->
rx_ring
[
ring_idx
];
q_vector
->
rx_ring
->
q_vector
=
q_vector
;
q_vector
->
rx
.
ring
->
q_vector
=
q_vector
;
q_vector
->
rx
.
count
++
;
q_vector
->
itr_val
=
adapter
->
rx_itr_setting
;
q_vector
->
itr_val
=
adapter
->
rx_itr_setting
;
if
(
q_vector
->
itr_val
&&
q_vector
->
itr_val
<=
3
)
if
(
q_vector
->
itr_val
&&
q_vector
->
itr_val
<=
3
)
q_vector
->
itr_val
=
IGB_START_ITR
;
q_vector
->
itr_val
=
IGB_START_ITR
;
...
@@ -1122,10 +1160,11 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
...
@@ -1122,10 +1160,11 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
{
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
v_idx
];
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
v_idx
];
q_vector
->
tx_ring
=
adapter
->
tx_ring
[
ring_idx
];
q_vector
->
tx
.
ring
=
adapter
->
tx_ring
[
ring_idx
];
q_vector
->
tx_ring
->
q_vector
=
q_vector
;
q_vector
->
tx
.
ring
->
q_vector
=
q_vector
;
q_vector
->
tx
.
count
++
;
q_vector
->
itr_val
=
adapter
->
tx_itr_setting
;
q_vector
->
itr_val
=
adapter
->
tx_itr_setting
;
q_vector
->
tx
_
work_limit
=
adapter
->
tx_work_limit
;
q_vector
->
tx
.
work_limit
=
adapter
->
tx_work_limit
;
if
(
q_vector
->
itr_val
&&
q_vector
->
itr_val
<=
3
)
if
(
q_vector
->
itr_val
&&
q_vector
->
itr_val
<=
3
)
q_vector
->
itr_val
=
IGB_START_ITR
;
q_vector
->
itr_val
=
IGB_START_ITR
;
}
}
...
@@ -1770,17 +1809,8 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
...
@@ -1770,17 +1809,8 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
static
int
igb_set_features
(
struct
net_device
*
netdev
,
u32
features
)
static
int
igb_set_features
(
struct
net_device
*
netdev
,
u32
features
)
{
{
struct
igb_adapter
*
adapter
=
netdev_priv
(
netdev
);
int
i
;
u32
changed
=
netdev
->
features
^
features
;
u32
changed
=
netdev
->
features
^
features
;
for
(
i
=
0
;
i
<
adapter
->
num_rx_queues
;
i
++
)
{
if
(
features
&
NETIF_F_RXCSUM
)
adapter
->
rx_ring
[
i
]
->
flags
|=
IGB_RING_FLAG_RX_CSUM
;
else
adapter
->
rx_ring
[
i
]
->
flags
&=
~
IGB_RING_FLAG_RX_CSUM
;
}
if
(
changed
&
NETIF_F_HW_VLAN_RX
)
if
(
changed
&
NETIF_F_HW_VLAN_RX
)
igb_vlan_mode
(
netdev
,
features
);
igb_vlan_mode
(
netdev
,
features
);
...
@@ -1948,23 +1978,32 @@ static int __devinit igb_probe(struct pci_dev *pdev,
...
@@ -1948,23 +1978,32 @@ static int __devinit igb_probe(struct pci_dev *pdev,
dev_info
(
&
pdev
->
dev
,
dev_info
(
&
pdev
->
dev
,
"PHY reset is blocked due to SOL/IDER session.
\n
"
);
"PHY reset is blocked due to SOL/IDER session.
\n
"
);
netdev
->
hw_features
=
NETIF_F_SG
|
/*
* features is initialized to 0 in allocation, it might have bits
* set by igb_sw_init so we should use an or instead of an
* assignment.
*/
netdev
->
features
|=
NETIF_F_SG
|
NETIF_F_IP_CSUM
|
NETIF_F_IP_CSUM
|
NETIF_F_IPV6_CSUM
|
NETIF_F_IPV6_CSUM
|
NETIF_F_TSO
|
NETIF_F_TSO
|
NETIF_F_TSO6
|
NETIF_F_TSO6
|
NETIF_F_RXHASH
|
NETIF_F_RXCSUM
|
NETIF_F_RXCSUM
|
NETIF_F_HW_VLAN_RX
;
NETIF_F_HW_VLAN_RX
|
NETIF_F_HW_VLAN_TX
;
netdev
->
features
=
netdev
->
hw_features
|
/* copy netdev features into list of user selectable features */
NETIF_F_HW_VLAN_TX
|
netdev
->
hw_features
|=
netdev
->
features
;
NETIF_F_HW_VLAN_FILTER
;
netdev
->
vlan_features
|=
NETIF_F_TSO
;
/* set this bit last since it cannot be part of hw_features */
netdev
->
vlan_features
|=
NETIF_F_TSO6
;
netdev
->
features
|=
NETIF_F_HW_VLAN_FILTER
;
netdev
->
vlan_features
|=
NETIF_F_IP_CSUM
;
netdev
->
vlan_features
|=
NETIF_F_IPV6_CSUM
;
netdev
->
vlan_features
|=
NETIF_F_TSO
|
netdev
->
vlan_features
|=
NETIF_F_SG
;
NETIF_F_TSO6
|
NETIF_F_IP_CSUM
|
NETIF_F_IPV6_CSUM
|
NETIF_F_SG
;
if
(
pci_using_dac
)
{
if
(
pci_using_dac
)
{
netdev
->
features
|=
NETIF_F_HIGHDMA
;
netdev
->
features
|=
NETIF_F_HIGHDMA
;
...
@@ -2082,8 +2121,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
...
@@ -2082,8 +2121,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if
(
err
)
if
(
err
)
goto
err_register
;
goto
err_register
;
igb_vlan_mode
(
netdev
,
netdev
->
features
);
/* carrier off reporting is important to ethtool even BEFORE open */
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off
(
netdev
);
netif_carrier_off
(
netdev
);
...
@@ -2409,6 +2446,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
...
@@ -2409,6 +2446,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
VLAN_HLEN
;
VLAN_HLEN
;
adapter
->
min_frame_size
=
ETH_ZLEN
+
ETH_FCS_LEN
;
adapter
->
min_frame_size
=
ETH_ZLEN
+
ETH_FCS_LEN
;
adapter
->
node
=
-
1
;
spin_lock_init
(
&
adapter
->
stats64_lock
);
spin_lock_init
(
&
adapter
->
stats64_lock
);
#ifdef CONFIG_PCI_IOV
#ifdef CONFIG_PCI_IOV
switch
(
hw
->
mac
.
type
)
{
switch
(
hw
->
mac
.
type
)
{
...
@@ -2579,9 +2618,12 @@ static int igb_close(struct net_device *netdev)
...
@@ -2579,9 +2618,12 @@ static int igb_close(struct net_device *netdev)
int
igb_setup_tx_resources
(
struct
igb_ring
*
tx_ring
)
int
igb_setup_tx_resources
(
struct
igb_ring
*
tx_ring
)
{
{
struct
device
*
dev
=
tx_ring
->
dev
;
struct
device
*
dev
=
tx_ring
->
dev
;
int
orig_node
=
dev_to_node
(
dev
);
int
size
;
int
size
;
size
=
sizeof
(
struct
igb_tx_buffer
)
*
tx_ring
->
count
;
size
=
sizeof
(
struct
igb_tx_buffer
)
*
tx_ring
->
count
;
tx_ring
->
tx_buffer_info
=
vzalloc_node
(
size
,
tx_ring
->
numa_node
);
if
(
!
tx_ring
->
tx_buffer_info
)
tx_ring
->
tx_buffer_info
=
vzalloc
(
size
);
tx_ring
->
tx_buffer_info
=
vzalloc
(
size
);
if
(
!
tx_ring
->
tx_buffer_info
)
if
(
!
tx_ring
->
tx_buffer_info
)
goto
err
;
goto
err
;
...
@@ -2590,6 +2632,13 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
...
@@ -2590,6 +2632,13 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring
->
size
=
tx_ring
->
count
*
sizeof
(
union
e1000_adv_tx_desc
);
tx_ring
->
size
=
tx_ring
->
count
*
sizeof
(
union
e1000_adv_tx_desc
);
tx_ring
->
size
=
ALIGN
(
tx_ring
->
size
,
4096
);
tx_ring
->
size
=
ALIGN
(
tx_ring
->
size
,
4096
);
set_dev_node
(
dev
,
tx_ring
->
numa_node
);
tx_ring
->
desc
=
dma_alloc_coherent
(
dev
,
tx_ring
->
size
,
&
tx_ring
->
dma
,
GFP_KERNEL
);
set_dev_node
(
dev
,
orig_node
);
if
(
!
tx_ring
->
desc
)
tx_ring
->
desc
=
dma_alloc_coherent
(
dev
,
tx_ring
->
desc
=
dma_alloc_coherent
(
dev
,
tx_ring
->
size
,
tx_ring
->
size
,
&
tx_ring
->
dma
,
&
tx_ring
->
dma
,
...
@@ -2600,6 +2649,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
...
@@ -2600,6 +2649,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring
->
next_to_use
=
0
;
tx_ring
->
next_to_use
=
0
;
tx_ring
->
next_to_clean
=
0
;
tx_ring
->
next_to_clean
=
0
;
return
0
;
return
0
;
err:
err:
...
@@ -2722,9 +2772,12 @@ static void igb_configure_tx(struct igb_adapter *adapter)
...
@@ -2722,9 +2772,12 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int
igb_setup_rx_resources
(
struct
igb_ring
*
rx_ring
)
int
igb_setup_rx_resources
(
struct
igb_ring
*
rx_ring
)
{
{
struct
device
*
dev
=
rx_ring
->
dev
;
struct
device
*
dev
=
rx_ring
->
dev
;
int
orig_node
=
dev_to_node
(
dev
);
int
size
,
desc_len
;
int
size
,
desc_len
;
size
=
sizeof
(
struct
igb_rx_buffer
)
*
rx_ring
->
count
;
size
=
sizeof
(
struct
igb_rx_buffer
)
*
rx_ring
->
count
;
rx_ring
->
rx_buffer_info
=
vzalloc_node
(
size
,
rx_ring
->
numa_node
);
if
(
!
rx_ring
->
rx_buffer_info
)
rx_ring
->
rx_buffer_info
=
vzalloc
(
size
);
rx_ring
->
rx_buffer_info
=
vzalloc
(
size
);
if
(
!
rx_ring
->
rx_buffer_info
)
if
(
!
rx_ring
->
rx_buffer_info
)
goto
err
;
goto
err
;
...
@@ -2735,6 +2788,13 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
...
@@ -2735,6 +2788,13 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring
->
size
=
rx_ring
->
count
*
desc_len
;
rx_ring
->
size
=
rx_ring
->
count
*
desc_len
;
rx_ring
->
size
=
ALIGN
(
rx_ring
->
size
,
4096
);
rx_ring
->
size
=
ALIGN
(
rx_ring
->
size
,
4096
);
set_dev_node
(
dev
,
rx_ring
->
numa_node
);
rx_ring
->
desc
=
dma_alloc_coherent
(
dev
,
rx_ring
->
size
,
&
rx_ring
->
dma
,
GFP_KERNEL
);
set_dev_node
(
dev
,
orig_node
);
if
(
!
rx_ring
->
desc
)
rx_ring
->
desc
=
dma_alloc_coherent
(
dev
,
rx_ring
->
desc
=
dma_alloc_coherent
(
dev
,
rx_ring
->
size
,
rx_ring
->
size
,
&
rx_ring
->
dma
,
&
rx_ring
->
dma
,
...
@@ -3169,7 +3229,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
...
@@ -3169,7 +3229,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
{
struct
igb_tx_buffer
*
buffer_info
;
struct
igb_tx_buffer
*
buffer_info
;
unsigned
long
size
;
unsigned
long
size
;
u
nsigned
int
i
;
u
16
i
;
if
(
!
tx_ring
->
tx_buffer_info
)
if
(
!
tx_ring
->
tx_buffer_info
)
return
;
return
;
...
@@ -3703,7 +3763,7 @@ static void igb_watchdog_task(struct work_struct *work)
...
@@ -3703,7 +3763,7 @@ static void igb_watchdog_task(struct work_struct *work)
}
}
/* Force detection of hung controller every watchdog period */
/* Force detection of hung controller every watchdog period */
tx_ring
->
detect_tx_hung
=
true
;
set_bit
(
IGB_RING_FLAG_TX_DETECT_HANG
,
&
tx_ring
->
flags
)
;
}
}
/* Cause software interrupt to ensure rx ring is cleaned */
/* Cause software interrupt to ensure rx ring is cleaned */
...
@@ -3754,33 +3814,24 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
...
@@ -3754,33 +3814,24 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
int
new_val
=
q_vector
->
itr_val
;
int
new_val
=
q_vector
->
itr_val
;
int
avg_wire_size
=
0
;
int
avg_wire_size
=
0
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_ring
*
ring
;
unsigned
int
packets
;
unsigned
int
packets
;
/* For non-gigabit speeds, just fix the interrupt rate at 4000
/* For non-gigabit speeds, just fix the interrupt rate at 4000
* ints/sec - ITR timer value of 120 ticks.
* ints/sec - ITR timer value of 120 ticks.
*/
*/
if
(
adapter
->
link_speed
!=
SPEED_1000
)
{
if
(
adapter
->
link_speed
!=
SPEED_1000
)
{
new_val
=
976
;
new_val
=
IGB_4K_ITR
;
goto
set_itr_val
;
goto
set_itr_val
;
}
}
ring
=
q_vector
->
rx_ring
;
packets
=
q_vector
->
rx
.
total_packets
;
if
(
ring
)
{
packets
=
ACCESS_ONCE
(
ring
->
total_packets
);
if
(
packets
)
if
(
packets
)
avg_wire_size
=
ring
->
total_bytes
/
packets
;
avg_wire_size
=
q_vector
->
rx
.
total_bytes
/
packets
;
}
ring
=
q_vector
->
tx_ring
;
if
(
ring
)
{
packets
=
ACCESS_ONCE
(
ring
->
total_packets
);
packets
=
q_vector
->
tx
.
total_packets
;
if
(
packets
)
if
(
packets
)
avg_wire_size
=
max_t
(
u32
,
avg_wire_size
,
avg_wire_size
=
max_t
(
u32
,
avg_wire_size
,
ring
->
total_bytes
/
packets
);
q_vector
->
tx
.
total_bytes
/
packets
);
}
/* if avg_wire_size isn't set no work was done */
/* if avg_wire_size isn't set no work was done */
if
(
!
avg_wire_size
)
if
(
!
avg_wire_size
)
...
@@ -3798,9 +3849,11 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
...
@@ -3798,9 +3849,11 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
else
else
new_val
=
avg_wire_size
/
2
;
new_val
=
avg_wire_size
/
2
;
/* when in itr mode 3 do not exceed 20K ints/sec */
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if
(
adapter
->
rx_itr_setting
==
3
&&
new_val
<
196
)
if
(
new_val
<
IGB_20K_ITR
&&
new_val
=
196
;
((
q_vector
->
rx
.
ring
&&
adapter
->
rx_itr_setting
==
3
)
||
(
!
q_vector
->
rx
.
ring
&&
adapter
->
tx_itr_setting
==
3
)))
new_val
=
IGB_20K_ITR
;
set_itr_val:
set_itr_val:
if
(
new_val
!=
q_vector
->
itr_val
)
{
if
(
new_val
!=
q_vector
->
itr_val
)
{
...
@@ -3808,14 +3861,10 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
...
@@ -3808,14 +3861,10 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
q_vector
->
set_itr
=
1
;
q_vector
->
set_itr
=
1
;
}
}
clear_counts:
clear_counts:
if
(
q_vector
->
rx_ring
)
{
q_vector
->
rx
.
total_bytes
=
0
;
q_vector
->
rx_ring
->
total_bytes
=
0
;
q_vector
->
rx
.
total_packets
=
0
;
q_vector
->
rx_ring
->
total_packets
=
0
;
q_vector
->
tx
.
total_bytes
=
0
;
}
q_vector
->
tx
.
total_packets
=
0
;
if
(
q_vector
->
tx_ring
)
{
q_vector
->
tx_ring
->
total_bytes
=
0
;
q_vector
->
tx_ring
->
total_packets
=
0
;
}
}
}
/**
/**
...
@@ -3831,106 +3880,102 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
...
@@ -3831,106 +3880,102 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
* parameter (see igb_param.c)
* parameter (see igb_param.c)
* NOTE: These calculations are only valid when operating in a single-
* NOTE: These calculations are only valid when operating in a single-
* queue environment.
* queue environment.
* @adapter: pointer to adapter
* @q_vector: pointer to q_vector
* @itr_setting: current q_vector->itr_val
* @ring_container: ring info to update the itr for
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
**/
**/
static
unsigned
int
igb_update_itr
(
struct
igb_adapter
*
adapter
,
u16
itr_setting
,
static
void
igb_update_itr
(
struct
igb_q_vector
*
q_vector
,
int
packets
,
int
bytes
)
struct
igb_ring_container
*
ring_container
)
{
{
unsigned
int
retval
=
itr_setting
;
unsigned
int
packets
=
ring_container
->
total_packets
;
unsigned
int
bytes
=
ring_container
->
total_bytes
;
u8
itrval
=
ring_container
->
itr
;
/* no packets, exit with status unchanged */
if
(
packets
==
0
)
if
(
packets
==
0
)
goto
update_itr_done
;
return
;
switch
(
itr
_setting
)
{
switch
(
itr
val
)
{
case
lowest_latency
:
case
lowest_latency
:
/* handle TSO and jumbo frames */
/* handle TSO and jumbo frames */
if
(
bytes
/
packets
>
8000
)
if
(
bytes
/
packets
>
8000
)
ret
val
=
bulk_latency
;
itr
val
=
bulk_latency
;
else
if
((
packets
<
5
)
&&
(
bytes
>
512
))
else
if
((
packets
<
5
)
&&
(
bytes
>
512
))
ret
val
=
low_latency
;
itr
val
=
low_latency
;
break
;
break
;
case
low_latency
:
/* 50 usec aka 20000 ints/s */
case
low_latency
:
/* 50 usec aka 20000 ints/s */
if
(
bytes
>
10000
)
{
if
(
bytes
>
10000
)
{
/* this if handles the TSO accounting */
/* this if handles the TSO accounting */
if
(
bytes
/
packets
>
8000
)
{
if
(
bytes
/
packets
>
8000
)
{
ret
val
=
bulk_latency
;
itr
val
=
bulk_latency
;
}
else
if
((
packets
<
10
)
||
((
bytes
/
packets
)
>
1200
))
{
}
else
if
((
packets
<
10
)
||
((
bytes
/
packets
)
>
1200
))
{
ret
val
=
bulk_latency
;
itr
val
=
bulk_latency
;
}
else
if
((
packets
>
35
))
{
}
else
if
((
packets
>
35
))
{
ret
val
=
lowest_latency
;
itr
val
=
lowest_latency
;
}
}
}
else
if
(
bytes
/
packets
>
2000
)
{
}
else
if
(
bytes
/
packets
>
2000
)
{
ret
val
=
bulk_latency
;
itr
val
=
bulk_latency
;
}
else
if
(
packets
<=
2
&&
bytes
<
512
)
{
}
else
if
(
packets
<=
2
&&
bytes
<
512
)
{
ret
val
=
lowest_latency
;
itr
val
=
lowest_latency
;
}
}
break
;
break
;
case
bulk_latency
:
/* 250 usec aka 4000 ints/s */
case
bulk_latency
:
/* 250 usec aka 4000 ints/s */
if
(
bytes
>
25000
)
{
if
(
bytes
>
25000
)
{
if
(
packets
>
35
)
if
(
packets
>
35
)
ret
val
=
low_latency
;
itr
val
=
low_latency
;
}
else
if
(
bytes
<
1500
)
{
}
else
if
(
bytes
<
1500
)
{
ret
val
=
low_latency
;
itr
val
=
low_latency
;
}
}
break
;
break
;
}
}
update_itr_done:
/* clear work counters since we have the values we need */
return
retval
;
ring_container
->
total_bytes
=
0
;
ring_container
->
total_packets
=
0
;
/* write updated itr to ring container */
ring_container
->
itr
=
itrval
;
}
}
static
void
igb_set_itr
(
struct
igb_
adapter
*
adapte
r
)
static
void
igb_set_itr
(
struct
igb_
q_vector
*
q_vecto
r
)
{
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
0
];
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
u16
current_itr
;
u32
new_itr
=
q_vector
->
itr_val
;
u32
new_itr
=
q_vector
->
itr_val
;
u8
current_itr
=
0
;
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if
(
adapter
->
link_speed
!=
SPEED_1000
)
{
if
(
adapter
->
link_speed
!=
SPEED_1000
)
{
current_itr
=
0
;
current_itr
=
0
;
new_itr
=
4000
;
new_itr
=
IGB_4K_ITR
;
goto
set_itr_now
;
goto
set_itr_now
;
}
}
adapter
->
rx_itr
=
igb_update_itr
(
adapter
,
igb_update_itr
(
q_vector
,
&
q_vector
->
tx
);
adapter
->
rx_itr
,
igb_update_itr
(
q_vector
,
&
q_vector
->
rx
);
q_vector
->
rx_ring
->
total_packets
,
q_vector
->
rx_ring
->
total_bytes
);
adapter
->
tx_itr
=
igb_update_itr
(
adapter
,
current_itr
=
max
(
q_vector
->
rx
.
itr
,
q_vector
->
tx
.
itr
);
adapter
->
tx_itr
,
q_vector
->
tx_ring
->
total_packets
,
q_vector
->
tx_ring
->
total_bytes
);
current_itr
=
max
(
adapter
->
rx_itr
,
adapter
->
tx_itr
);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if
(
adapter
->
rx_itr_setting
==
3
&&
current_itr
==
lowest_latency
)
if
(
current_itr
==
lowest_latency
&&
((
q_vector
->
rx
.
ring
&&
adapter
->
rx_itr_setting
==
3
)
||
(
!
q_vector
->
rx
.
ring
&&
adapter
->
tx_itr_setting
==
3
)))
current_itr
=
low_latency
;
current_itr
=
low_latency
;
switch
(
current_itr
)
{
switch
(
current_itr
)
{
/* counts and packets in update_itr are dependent on these numbers */
/* counts and packets in update_itr are dependent on these numbers */
case
lowest_latency
:
case
lowest_latency
:
new_itr
=
56
;
/* aka
70,000 ints/sec */
new_itr
=
IGB_70K_ITR
;
/*
70,000 ints/sec */
break
;
break
;
case
low_latency
:
case
low_latency
:
new_itr
=
196
;
/* aka
20,000 ints/sec */
new_itr
=
IGB_20K_ITR
;
/*
20,000 ints/sec */
break
;
break
;
case
bulk_latency
:
case
bulk_latency
:
new_itr
=
980
;
/* aka
4,000 ints/sec */
new_itr
=
IGB_4K_ITR
;
/*
4,000 ints/sec */
break
;
break
;
default:
default:
break
;
break
;
}
}
set_itr_now:
set_itr_now:
q_vector
->
rx_ring
->
total_bytes
=
0
;
q_vector
->
rx_ring
->
total_packets
=
0
;
q_vector
->
tx_ring
->
total_bytes
=
0
;
q_vector
->
tx_ring
->
total_packets
=
0
;
if
(
new_itr
!=
q_vector
->
itr_val
)
{
if
(
new_itr
!=
q_vector
->
itr_val
)
{
/* this attempts to bias the interrupt rate towards Bulk
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* by adding intermediate steps when interrupt rate is
...
@@ -3966,7 +4011,7 @@ void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
...
@@ -3966,7 +4011,7 @@ void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
type_tucmd
|=
E1000_TXD_CMD_DEXT
|
E1000_ADVTXD_DTYP_CTXT
;
type_tucmd
|=
E1000_TXD_CMD_DEXT
|
E1000_ADVTXD_DTYP_CTXT
;
/* For 82575, context index must be unique per ring. */
/* For 82575, context index must be unique per ring. */
if
(
t
x_ring
->
flags
&
IGB_RING_FLAG_TX_CTX_IDX
)
if
(
t
est_bit
(
IGB_RING_FLAG_TX_CTX_IDX
,
&
tx_ring
->
flags
)
)
mss_l4len_idx
|=
tx_ring
->
reg_idx
<<
4
;
mss_l4len_idx
|=
tx_ring
->
reg_idx
<<
4
;
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
vlan_macip_lens
);
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
vlan_macip_lens
);
...
@@ -3975,10 +4020,11 @@ void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
...
@@ -3975,10 +4020,11 @@ void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
context_desc
->
mss_l4len_idx
=
cpu_to_le32
(
mss_l4len_idx
);
context_desc
->
mss_l4len_idx
=
cpu_to_le32
(
mss_l4len_idx
);
}
}
static
inline
int
igb_tso
(
struct
igb_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
static
int
igb_tso
(
struct
igb_ring
*
tx_ring
,
u32
tx_flags
,
__be16
protocol
,
u8
*
hdr_len
)
struct
igb_tx_buffer
*
first
,
u8
*
hdr_len
)
{
{
int
err
;
struct
sk_buff
*
skb
=
first
->
skb
;
u32
vlan_macip_lens
,
type_tucmd
;
u32
vlan_macip_lens
,
type_tucmd
;
u32
mss_l4len_idx
,
l4len
;
u32
mss_l4len_idx
,
l4len
;
...
@@ -3986,7 +4032,7 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -3986,7 +4032,7 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
return
0
;
return
0
;
if
(
skb_header_cloned
(
skb
))
{
if
(
skb_header_cloned
(
skb
))
{
err
=
pskb_expand_head
(
skb
,
0
,
0
,
GFP_ATOMIC
);
int
err
=
pskb_expand_head
(
skb
,
0
,
0
,
GFP_ATOMIC
);
if
(
err
)
if
(
err
)
return
err
;
return
err
;
}
}
...
@@ -3994,7 +4040,7 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -3994,7 +4040,7 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd
=
E1000_ADVTXD_TUCMD_L4T_TCP
;
type_tucmd
=
E1000_ADVTXD_TUCMD_L4T_TCP
;
if
(
protocol
==
__constant_htons
(
ETH_P_IP
))
{
if
(
first
->
protocol
==
__constant_htons
(
ETH_P_IP
))
{
struct
iphdr
*
iph
=
ip_hdr
(
skb
);
struct
iphdr
*
iph
=
ip_hdr
(
skb
);
iph
->
tot_len
=
0
;
iph
->
tot_len
=
0
;
iph
->
check
=
0
;
iph
->
check
=
0
;
...
@@ -4003,16 +4049,26 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -4003,16 +4049,26 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
IPPROTO_TCP
,
IPPROTO_TCP
,
0
);
0
);
type_tucmd
|=
E1000_ADVTXD_TUCMD_IPV4
;
type_tucmd
|=
E1000_ADVTXD_TUCMD_IPV4
;
first
->
tx_flags
|=
IGB_TX_FLAGS_TSO
|
IGB_TX_FLAGS_CSUM
|
IGB_TX_FLAGS_IPV4
;
}
else
if
(
skb_is_gso_v6
(
skb
))
{
}
else
if
(
skb_is_gso_v6
(
skb
))
{
ipv6_hdr
(
skb
)
->
payload_len
=
0
;
ipv6_hdr
(
skb
)
->
payload_len
=
0
;
tcp_hdr
(
skb
)
->
check
=
~
csum_ipv6_magic
(
&
ipv6_hdr
(
skb
)
->
saddr
,
tcp_hdr
(
skb
)
->
check
=
~
csum_ipv6_magic
(
&
ipv6_hdr
(
skb
)
->
saddr
,
&
ipv6_hdr
(
skb
)
->
daddr
,
&
ipv6_hdr
(
skb
)
->
daddr
,
0
,
IPPROTO_TCP
,
0
);
0
,
IPPROTO_TCP
,
0
);
first
->
tx_flags
|=
IGB_TX_FLAGS_TSO
|
IGB_TX_FLAGS_CSUM
;
}
}
/* compute header lengths */
l4len
=
tcp_hdrlen
(
skb
);
l4len
=
tcp_hdrlen
(
skb
);
*
hdr_len
=
skb_transport_offset
(
skb
)
+
l4len
;
*
hdr_len
=
skb_transport_offset
(
skb
)
+
l4len
;
/* update gso size and bytecount with header size */
first
->
gso_segs
=
skb_shinfo
(
skb
)
->
gso_segs
;
first
->
bytecount
+=
(
first
->
gso_segs
-
1
)
*
*
hdr_len
;
/* MSS L4LEN IDX */
/* MSS L4LEN IDX */
mss_l4len_idx
=
l4len
<<
E1000_ADVTXD_L4LEN_SHIFT
;
mss_l4len_idx
=
l4len
<<
E1000_ADVTXD_L4LEN_SHIFT
;
mss_l4len_idx
|=
skb_shinfo
(
skb
)
->
gso_size
<<
E1000_ADVTXD_MSS_SHIFT
;
mss_l4len_idx
|=
skb_shinfo
(
skb
)
->
gso_size
<<
E1000_ADVTXD_MSS_SHIFT
;
...
@@ -4020,26 +4076,26 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -4020,26 +4076,26 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
/* VLAN MACLEN IPLEN */
/* VLAN MACLEN IPLEN */
vlan_macip_lens
=
skb_network_header_len
(
skb
);
vlan_macip_lens
=
skb_network_header_len
(
skb
);
vlan_macip_lens
|=
skb_network_offset
(
skb
)
<<
E1000_ADVTXD_MACLEN_SHIFT
;
vlan_macip_lens
|=
skb_network_offset
(
skb
)
<<
E1000_ADVTXD_MACLEN_SHIFT
;
vlan_macip_lens
|=
tx_flags
&
IGB_TX_FLAGS_VLAN_MASK
;
vlan_macip_lens
|=
first
->
tx_flags
&
IGB_TX_FLAGS_VLAN_MASK
;
igb_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
type_tucmd
,
mss_l4len_idx
);
igb_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
type_tucmd
,
mss_l4len_idx
);
return
1
;
return
1
;
}
}
static
inline
bool
igb_tx_csum
(
struct
igb_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
static
void
igb_tx_csum
(
struct
igb_ring
*
tx_ring
,
struct
igb_tx_buffer
*
first
)
u32
tx_flags
,
__be16
protocol
)
{
{
struct
sk_buff
*
skb
=
first
->
skb
;
u32
vlan_macip_lens
=
0
;
u32
vlan_macip_lens
=
0
;
u32
mss_l4len_idx
=
0
;
u32
mss_l4len_idx
=
0
;
u32
type_tucmd
=
0
;
u32
type_tucmd
=
0
;
if
(
skb
->
ip_summed
!=
CHECKSUM_PARTIAL
)
{
if
(
skb
->
ip_summed
!=
CHECKSUM_PARTIAL
)
{
if
(
!
(
tx_flags
&
IGB_TX_FLAGS_VLAN
))
if
(
!
(
first
->
tx_flags
&
IGB_TX_FLAGS_VLAN
))
return
false
;
return
;
}
else
{
}
else
{
u8
l4_hdr
=
0
;
u8
l4_hdr
=
0
;
switch
(
protocol
)
{
switch
(
first
->
protocol
)
{
case
__constant_htons
(
ETH_P_IP
):
case
__constant_htons
(
ETH_P_IP
):
vlan_macip_lens
|=
skb_network_header_len
(
skb
);
vlan_macip_lens
|=
skb_network_header_len
(
skb
);
type_tucmd
|=
E1000_ADVTXD_TUCMD_IPV4
;
type_tucmd
|=
E1000_ADVTXD_TUCMD_IPV4
;
...
@@ -4053,7 +4109,7 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -4053,7 +4109,7 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring, struct sk_buff *skb,
if
(
unlikely
(
net_ratelimit
()))
{
if
(
unlikely
(
net_ratelimit
()))
{
dev_warn
(
tx_ring
->
dev
,
dev_warn
(
tx_ring
->
dev
,
"partial checksum but proto=%x!
\n
"
,
"partial checksum but proto=%x!
\n
"
,
protocol
);
first
->
protocol
);
}
}
break
;
break
;
}
}
...
@@ -4081,14 +4137,15 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -4081,14 +4137,15 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring, struct sk_buff *skb,
}
}
break
;
break
;
}
}
/* update TX checksum flag */
first
->
tx_flags
|=
IGB_TX_FLAGS_CSUM
;
}
}
vlan_macip_lens
|=
skb_network_offset
(
skb
)
<<
E1000_ADVTXD_MACLEN_SHIFT
;
vlan_macip_lens
|=
skb_network_offset
(
skb
)
<<
E1000_ADVTXD_MACLEN_SHIFT
;
vlan_macip_lens
|=
tx_flags
&
IGB_TX_FLAGS_VLAN_MASK
;
vlan_macip_lens
|=
first
->
tx_flags
&
IGB_TX_FLAGS_VLAN_MASK
;
igb_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
type_tucmd
,
mss_l4len_idx
);
igb_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
type_tucmd
,
mss_l4len_idx
);
return
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
);
}
}
static
__le32
igb_tx_cmd_type
(
u32
tx_flags
)
static
__le32
igb_tx_cmd_type
(
u32
tx_flags
)
...
@@ -4113,14 +4170,15 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
...
@@ -4113,14 +4170,15 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
return
cmd_type
;
return
cmd_type
;
}
}
static
__le32
igb_tx_olinfo_status
(
u32
tx_flags
,
unsigned
int
paylen
,
static
void
igb_tx_olinfo_status
(
struct
igb_ring
*
tx_ring
,
struct
igb_ring
*
tx_ring
)
union
e1000_adv_tx_desc
*
tx_desc
,
u32
tx_flags
,
unsigned
int
paylen
)
{
{
u32
olinfo_status
=
paylen
<<
E1000_ADVTXD_PAYLEN_SHIFT
;
u32
olinfo_status
=
paylen
<<
E1000_ADVTXD_PAYLEN_SHIFT
;
/* 82575 requires a unique index per ring if any offload is enabled */
/* 82575 requires a unique index per ring if any offload is enabled */
if
((
tx_flags
&
(
IGB_TX_FLAGS_CSUM
|
IGB_TX_FLAGS_VLAN
))
&&
if
((
tx_flags
&
(
IGB_TX_FLAGS_CSUM
|
IGB_TX_FLAGS_VLAN
))
&&
(
tx_ring
->
flags
&
IGB_RING_FLAG_TX_CTX_IDX
))
test_bit
(
IGB_RING_FLAG_TX_CTX_IDX
,
&
tx_ring
->
flags
))
olinfo_status
|=
tx_ring
->
reg_idx
<<
4
;
olinfo_status
|=
tx_ring
->
reg_idx
<<
4
;
/* insert L4 checksum */
/* insert L4 checksum */
...
@@ -4132,7 +4190,7 @@ static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen,
...
@@ -4132,7 +4190,7 @@ static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen,
olinfo_status
|=
E1000_TXD_POPTS_IXSM
<<
8
;
olinfo_status
|=
E1000_TXD_POPTS_IXSM
<<
8
;
}
}
return
cpu_to_le32
(
olinfo_status
);
tx_desc
->
read
.
olinfo_status
=
cpu_to_le32
(
olinfo_status
);
}
}
/*
/*
...
@@ -4140,12 +4198,13 @@ static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen,
...
@@ -4140,12 +4198,13 @@ static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen,
* maintain a power of two alignment we have to limit ourselves to 32K.
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
*/
#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_DATA_PER_TXD (1
<<
IGB_MAX_TXD_PWR)
#define IGB_MAX_DATA_PER_TXD (1
<<
IGB_MAX_TXD_PWR)
static
void
igb_tx_map
(
struct
igb_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
static
void
igb_tx_map
(
struct
igb_ring
*
tx_ring
,
struct
igb_tx_buffer
*
first
,
u32
tx_flags
,
struct
igb_tx_buffer
*
first
,
const
u8
hdr_len
)
const
u8
hdr_len
)
{
{
struct
sk_buff
*
skb
=
first
->
skb
;
struct
igb_tx_buffer
*
tx_buffer_info
;
struct
igb_tx_buffer
*
tx_buffer_info
;
union
e1000_adv_tx_desc
*
tx_desc
;
union
e1000_adv_tx_desc
*
tx_desc
;
dma_addr_t
dma
;
dma_addr_t
dma
;
...
@@ -4154,24 +4213,12 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -4154,24 +4213,12 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned
int
size
=
skb_headlen
(
skb
);
unsigned
int
size
=
skb_headlen
(
skb
);
unsigned
int
paylen
=
skb
->
len
-
hdr_len
;
unsigned
int
paylen
=
skb
->
len
-
hdr_len
;
__le32
cmd_type
;
__le32
cmd_type
;
u32
tx_flags
=
first
->
tx_flags
;
u16
i
=
tx_ring
->
next_to_use
;
u16
i
=
tx_ring
->
next_to_use
;
u16
gso_segs
;
if
(
tx_flags
&
IGB_TX_FLAGS_TSO
)
gso_segs
=
skb_shinfo
(
skb
)
->
gso_segs
;
else
gso_segs
=
1
;
/* multiply data chunks by size of headers */
first
->
bytecount
=
paylen
+
(
gso_segs
*
hdr_len
);
first
->
gso_segs
=
gso_segs
;
first
->
skb
=
skb
;
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
i
);
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
i
);
tx_desc
->
read
.
olinfo_status
=
igb_tx_olinfo_status
(
tx_ring
,
tx_desc
,
tx_flags
,
paylen
);
igb_tx_olinfo_status
(
tx_flags
,
paylen
,
tx_ring
);
cmd_type
=
igb_tx_cmd_type
(
tx_flags
);
cmd_type
=
igb_tx_cmd_type
(
tx_flags
);
dma
=
dma_map_single
(
tx_ring
->
dev
,
skb
->
data
,
size
,
DMA_TO_DEVICE
);
dma
=
dma_map_single
(
tx_ring
->
dev
,
skb
->
data
,
size
,
DMA_TO_DEVICE
);
...
@@ -4181,7 +4228,6 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -4181,7 +4228,6 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
/* record length, and DMA address */
/* record length, and DMA address */
first
->
length
=
size
;
first
->
length
=
size
;
first
->
dma
=
dma
;
first
->
dma
=
dma
;
first
->
tx_flags
=
tx_flags
;
tx_desc
->
read
.
buffer_addr
=
cpu_to_le64
(
dma
);
tx_desc
->
read
.
buffer_addr
=
cpu_to_le64
(
dma
);
for
(;;)
{
for
(;;)
{
...
@@ -4284,7 +4330,7 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
...
@@ -4284,7 +4330,7 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
tx_ring
->
next_to_use
=
i
;
tx_ring
->
next_to_use
=
i
;
}
}
static
int
__igb_maybe_stop_tx
(
struct
igb_ring
*
tx_ring
,
int
size
)
static
int
__igb_maybe_stop_tx
(
struct
igb_ring
*
tx_ring
,
const
u16
size
)
{
{
struct
net_device
*
netdev
=
tx_ring
->
netdev
;
struct
net_device
*
netdev
=
tx_ring
->
netdev
;
...
@@ -4310,7 +4356,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
...
@@ -4310,7 +4356,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
return
0
;
return
0
;
}
}
static
inline
int
igb_maybe_stop_tx
(
struct
igb_ring
*
tx_ring
,
int
size
)
static
inline
int
igb_maybe_stop_tx
(
struct
igb_ring
*
tx_ring
,
const
u16
size
)
{
{
if
(
igb_desc_unused
(
tx_ring
)
>=
size
)
if
(
igb_desc_unused
(
tx_ring
)
>=
size
)
return
0
;
return
0
;
...
@@ -4336,6 +4382,12 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
...
@@ -4336,6 +4382,12 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return
NETDEV_TX_BUSY
;
return
NETDEV_TX_BUSY
;
}
}
/* record the location of the first descriptor for this packet */
first
=
&
tx_ring
->
tx_buffer_info
[
tx_ring
->
next_to_use
];
first
->
skb
=
skb
;
first
->
bytecount
=
skb
->
len
;
first
->
gso_segs
=
1
;
if
(
unlikely
(
skb_shinfo
(
skb
)
->
tx_flags
&
SKBTX_HW_TSTAMP
))
{
if
(
unlikely
(
skb_shinfo
(
skb
)
->
tx_flags
&
SKBTX_HW_TSTAMP
))
{
skb_shinfo
(
skb
)
->
tx_flags
|=
SKBTX_IN_PROGRESS
;
skb_shinfo
(
skb
)
->
tx_flags
|=
SKBTX_IN_PROGRESS
;
tx_flags
|=
IGB_TX_FLAGS_TSTAMP
;
tx_flags
|=
IGB_TX_FLAGS_TSTAMP
;
...
@@ -4346,22 +4398,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
...
@@ -4346,22 +4398,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
tx_flags
|=
(
vlan_tx_tag_get
(
skb
)
<<
IGB_TX_FLAGS_VLAN_SHIFT
);
tx_flags
|=
(
vlan_tx_tag_get
(
skb
)
<<
IGB_TX_FLAGS_VLAN_SHIFT
);
}
}
/* record the location of the first descriptor for this packet */
/* record initial flags and protocol */
first
=
&
tx_ring
->
tx_buffer_info
[
tx_ring
->
next_to_use
];
first
->
tx_flags
=
tx_flags
;
first
->
protocol
=
protocol
;
tso
=
igb_tso
(
tx_ring
,
skb
,
tx_flags
,
protocol
,
&
hdr_len
);
tso
=
igb_tso
(
tx_ring
,
first
,
&
hdr_len
);
if
(
tso
<
0
)
{
if
(
tso
<
0
)
goto
out_drop
;
goto
out_drop
;
}
else
if
(
tso
)
{
else
if
(
!
tso
)
tx_flags
|=
IGB_TX_FLAGS_TSO
|
IGB_TX_FLAGS_CSUM
;
igb_tx_csum
(
tx_ring
,
first
);
if
(
protocol
==
htons
(
ETH_P_IP
))
tx_flags
|=
IGB_TX_FLAGS_IPV4
;
}
else
if
(
igb_tx_csum
(
tx_ring
,
skb
,
tx_flags
,
protocol
)
&&
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
))
{
tx_flags
|=
IGB_TX_FLAGS_CSUM
;
}
igb_tx_map
(
tx_ring
,
skb
,
first
,
tx_flags
,
hdr_len
);
igb_tx_map
(
tx_ring
,
first
,
hdr_len
);
/* Make sure there is space in the ring for the next send. */
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx
(
tx_ring
,
MAX_SKB_FRAGS
+
4
);
igb_maybe_stop_tx
(
tx_ring
,
MAX_SKB_FRAGS
+
4
);
...
@@ -4369,7 +4416,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
...
@@ -4369,7 +4416,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return
NETDEV_TX_OK
;
return
NETDEV_TX_OK
;
out_drop:
out_drop:
dev_kfree_skb_any
(
skb
);
igb_unmap_and_free_tx_resource
(
tx_ring
,
first
);
return
NETDEV_TX_OK
;
return
NETDEV_TX_OK
;
}
}
...
@@ -4755,7 +4803,7 @@ static void igb_write_itr(struct igb_q_vector *q_vector)
...
@@ -4755,7 +4803,7 @@ static void igb_write_itr(struct igb_q_vector *q_vector)
if
(
adapter
->
hw
.
mac
.
type
==
e1000_82575
)
if
(
adapter
->
hw
.
mac
.
type
==
e1000_82575
)
itr_val
|=
itr_val
<<
16
;
itr_val
|=
itr_val
<<
16
;
else
else
itr_val
|=
0x8000000
;
itr_val
|=
E1000_EITR_CNT_IGNR
;
writel
(
itr_val
,
q_vector
->
itr_register
);
writel
(
itr_val
,
q_vector
->
itr_register
);
q_vector
->
set_itr
=
0
;
q_vector
->
set_itr
=
0
;
...
@@ -4783,8 +4831,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
...
@@ -4783,8 +4831,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
if
(
q_vector
->
cpu
==
cpu
)
if
(
q_vector
->
cpu
==
cpu
)
goto
out_no_update
;
goto
out_no_update
;
if
(
q_vector
->
tx
_
ring
)
{
if
(
q_vector
->
tx
.
ring
)
{
int
q
=
q_vector
->
tx
_
ring
->
reg_idx
;
int
q
=
q_vector
->
tx
.
ring
->
reg_idx
;
u32
dca_txctrl
=
rd32
(
E1000_DCA_TXCTRL
(
q
));
u32
dca_txctrl
=
rd32
(
E1000_DCA_TXCTRL
(
q
));
if
(
hw
->
mac
.
type
==
e1000_82575
)
{
if
(
hw
->
mac
.
type
==
e1000_82575
)
{
dca_txctrl
&=
~
E1000_DCA_TXCTRL_CPUID_MASK
;
dca_txctrl
&=
~
E1000_DCA_TXCTRL_CPUID_MASK
;
...
@@ -4797,8 +4845,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
...
@@ -4797,8 +4845,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
dca_txctrl
|=
E1000_DCA_TXCTRL_DESC_DCA_EN
;
dca_txctrl
|=
E1000_DCA_TXCTRL_DESC_DCA_EN
;
wr32
(
E1000_DCA_TXCTRL
(
q
),
dca_txctrl
);
wr32
(
E1000_DCA_TXCTRL
(
q
),
dca_txctrl
);
}
}
if
(
q_vector
->
rx
_
ring
)
{
if
(
q_vector
->
rx
.
ring
)
{
int
q
=
q_vector
->
rx
_
ring
->
reg_idx
;
int
q
=
q_vector
->
rx
.
ring
->
reg_idx
;
u32
dca_rxctrl
=
rd32
(
E1000_DCA_RXCTRL
(
q
));
u32
dca_rxctrl
=
rd32
(
E1000_DCA_RXCTRL
(
q
));
if
(
hw
->
mac
.
type
==
e1000_82575
)
{
if
(
hw
->
mac
.
type
==
e1000_82575
)
{
dca_rxctrl
&=
~
E1000_DCA_RXCTRL_CPUID_MASK
;
dca_rxctrl
&=
~
E1000_DCA_RXCTRL_CPUID_MASK
;
...
@@ -5079,7 +5127,6 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
...
@@ -5079,7 +5127,6 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
}
}
adapter
->
vf_data
[
vf
].
vlans_enabled
++
;
adapter
->
vf_data
[
vf
].
vlans_enabled
++
;
return
0
;
}
}
}
else
{
}
else
{
if
(
i
<
E1000_VLVF_ARRAY_SIZE
)
{
if
(
i
<
E1000_VLVF_ARRAY_SIZE
)
{
...
@@ -5442,16 +5489,14 @@ static irqreturn_t igb_intr(int irq, void *data)
...
@@ -5442,16 +5489,14 @@ static irqreturn_t igb_intr(int irq, void *data)
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write */
* need for the IMC write */
u32
icr
=
rd32
(
E1000_ICR
);
u32
icr
=
rd32
(
E1000_ICR
);
if
(
!
icr
)
return
IRQ_NONE
;
/* Not our interrupt */
igb_write_itr
(
q_vector
);
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt */
* not set, then the adapter didn't send an interrupt */
if
(
!
(
icr
&
E1000_ICR_INT_ASSERTED
))
if
(
!
(
icr
&
E1000_ICR_INT_ASSERTED
))
return
IRQ_NONE
;
return
IRQ_NONE
;
igb_write_itr
(
q_vector
);
if
(
icr
&
E1000_ICR_DRSTA
)
if
(
icr
&
E1000_ICR_DRSTA
)
schedule_work
(
&
adapter
->
reset_task
);
schedule_work
(
&
adapter
->
reset_task
);
...
@@ -5472,15 +5517,15 @@ static irqreturn_t igb_intr(int irq, void *data)
...
@@ -5472,15 +5517,15 @@ static irqreturn_t igb_intr(int irq, void *data)
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
}
}
static
inline
void
igb_ring_irq_enable
(
struct
igb_q_vector
*
q_vector
)
void
igb_ring_irq_enable
(
struct
igb_q_vector
*
q_vector
)
{
{
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
if
((
q_vector
->
rx
_
ring
&&
(
adapter
->
rx_itr_setting
&
3
))
||
if
((
q_vector
->
rx
.
ring
&&
(
adapter
->
rx_itr_setting
&
3
))
||
(
!
q_vector
->
rx
_
ring
&&
(
adapter
->
tx_itr_setting
&
3
)))
{
(
!
q_vector
->
rx
.
ring
&&
(
adapter
->
tx_itr_setting
&
3
)))
{
if
(
!
adapter
->
msix_entries
)
if
(
(
adapter
->
num_q_vectors
==
1
)
&&
!
adapter
->
vf_data
)
igb_set_itr
(
adapte
r
);
igb_set_itr
(
q_vecto
r
);
else
else
igb_update_ring_itr
(
q_vector
);
igb_update_ring_itr
(
q_vector
);
}
}
...
@@ -5509,10 +5554,10 @@ static int igb_poll(struct napi_struct *napi, int budget)
...
@@ -5509,10 +5554,10 @@ static int igb_poll(struct napi_struct *napi, int budget)
if
(
q_vector
->
adapter
->
flags
&
IGB_FLAG_DCA_ENABLED
)
if
(
q_vector
->
adapter
->
flags
&
IGB_FLAG_DCA_ENABLED
)
igb_update_dca
(
q_vector
);
igb_update_dca
(
q_vector
);
#endif
#endif
if
(
q_vector
->
tx
_
ring
)
if
(
q_vector
->
tx
.
ring
)
clean_complete
=
igb_clean_tx_irq
(
q_vector
);
clean_complete
=
igb_clean_tx_irq
(
q_vector
);
if
(
q_vector
->
rx
_
ring
)
if
(
q_vector
->
rx
.
ring
)
clean_complete
&=
igb_clean_rx_irq
(
q_vector
,
budget
);
clean_complete
&=
igb_clean_rx_irq
(
q_vector
,
budget
);
/* If all work not completed, return budget and keep polling */
/* If all work not completed, return budget and keep polling */
...
@@ -5592,11 +5637,11 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
...
@@ -5592,11 +5637,11 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
static
bool
igb_clean_tx_irq
(
struct
igb_q_vector
*
q_vector
)
static
bool
igb_clean_tx_irq
(
struct
igb_q_vector
*
q_vector
)
{
{
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_ring
*
tx_ring
=
q_vector
->
tx
_
ring
;
struct
igb_ring
*
tx_ring
=
q_vector
->
tx
.
ring
;
struct
igb_tx_buffer
*
tx_buffer
;
struct
igb_tx_buffer
*
tx_buffer
;
union
e1000_adv_tx_desc
*
tx_desc
,
*
eop_desc
;
union
e1000_adv_tx_desc
*
tx_desc
,
*
eop_desc
;
unsigned
int
total_bytes
=
0
,
total_packets
=
0
;
unsigned
int
total_bytes
=
0
,
total_packets
=
0
;
unsigned
int
budget
=
q_vector
->
tx
_
work_limit
;
unsigned
int
budget
=
q_vector
->
tx
.
work_limit
;
unsigned
int
i
=
tx_ring
->
next_to_clean
;
unsigned
int
i
=
tx_ring
->
next_to_clean
;
if
(
test_bit
(
__IGB_DOWN
,
&
adapter
->
state
))
if
(
test_bit
(
__IGB_DOWN
,
&
adapter
->
state
))
...
@@ -5682,17 +5727,17 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
...
@@ -5682,17 +5727,17 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_ring
->
tx_stats
.
bytes
+=
total_bytes
;
tx_ring
->
tx_stats
.
bytes
+=
total_bytes
;
tx_ring
->
tx_stats
.
packets
+=
total_packets
;
tx_ring
->
tx_stats
.
packets
+=
total_packets
;
u64_stats_update_end
(
&
tx_ring
->
tx_syncp
);
u64_stats_update_end
(
&
tx_ring
->
tx_syncp
);
tx_ring
->
total_bytes
+=
total_bytes
;
q_vector
->
tx
.
total_bytes
+=
total_bytes
;
tx_ring
->
total_packets
+=
total_packets
;
q_vector
->
tx
.
total_packets
+=
total_packets
;
if
(
t
x_ring
->
detect_tx_hung
)
{
if
(
t
est_bit
(
IGB_RING_FLAG_TX_DETECT_HANG
,
&
tx_ring
->
flags
)
)
{
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
eop_desc
=
tx_buffer
->
next_to_watch
;
eop_desc
=
tx_buffer
->
next_to_watch
;
/* Detect a transmit hang in hardware, this serializes the
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
* check with the clearing of time_stamp and movement of i */
tx_ring
->
detect_tx_hung
=
false
;
clear_bit
(
IGB_RING_FLAG_TX_DETECT_HANG
,
&
tx_ring
->
flags
)
;
if
(
eop_desc
&&
if
(
eop_desc
&&
time_after
(
jiffies
,
tx_buffer
->
time_stamp
+
time_after
(
jiffies
,
tx_buffer
->
time_stamp
+
(
adapter
->
tx_timeout_factor
*
HZ
))
&&
(
adapter
->
tx_timeout_factor
*
HZ
))
&&
...
@@ -5751,25 +5796,30 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
...
@@ -5751,25 +5796,30 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}
static
inline
void
igb_rx_checksum
(
struct
igb_ring
*
ring
,
static
inline
void
igb_rx_checksum
(
struct
igb_ring
*
ring
,
u32
status_err
,
struct
sk_buff
*
skb
)
union
e1000_adv_rx_desc
*
rx_desc
,
struct
sk_buff
*
skb
)
{
{
skb_checksum_none_assert
(
skb
);
skb_checksum_none_assert
(
skb
);
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
/* Ignore Checksum bit is set */
if
(
!
(
ring
->
flags
&
IGB_RING_FLAG_RX_CSUM
)
||
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXD_STAT_IXSM
))
(
status_err
&
E1000_RXD_STAT_IXSM
))
return
;
/* Rx checksum disabled via ethtool */
if
(
!
(
ring
->
netdev
->
features
&
NETIF_F_RXCSUM
))
return
;
return
;
/* TCP/UDP checksum error bit is set */
/* TCP/UDP checksum error bit is set */
if
(
status_err
&
if
(
igb_test_staterr
(
rx_desc
,
(
E1000_RXDEXT_STATERR_TCPE
|
E1000_RXDEXT_STATERR_IPE
))
{
E1000_RXDEXT_STATERR_TCPE
|
E1000_RXDEXT_STATERR_IPE
))
{
/*
/*
* work around errata with sctp packets where the TCPE aka
* work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
* packets, (aka let the stack check the crc32c)
* packets, (aka let the stack check the crc32c)
*/
*/
if
((
skb
->
len
==
60
)
&&
if
(
!
(
(
skb
->
len
==
60
)
&&
(
ring
->
flags
&
IGB_RING_FLAG_RX_SCTP_CSUM
))
{
test_bit
(
IGB_RING_FLAG_RX_SCTP_CSUM
,
&
ring
->
flags
)
))
{
u64_stats_update_begin
(
&
ring
->
rx_syncp
);
u64_stats_update_begin
(
&
ring
->
rx_syncp
);
ring
->
rx_stats
.
csum_err
++
;
ring
->
rx_stats
.
csum_err
++
;
u64_stats_update_end
(
&
ring
->
rx_syncp
);
u64_stats_update_end
(
&
ring
->
rx_syncp
);
...
@@ -5778,19 +5828,34 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
...
@@ -5778,19 +5828,34 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
return
;
return
;
}
}
/* It must be a TCP or UDP packet with a valid checksum */
/* It must be a TCP or UDP packet with a valid checksum */
if
(
status_err
&
(
E1000_RXD_STAT_TCPCS
|
E1000_RXD_STAT_UDPCS
))
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXD_STAT_TCPCS
|
E1000_RXD_STAT_UDPCS
))
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
dev_dbg
(
ring
->
dev
,
"cksum success: bits %08X
\n
"
,
status_err
);
dev_dbg
(
ring
->
dev
,
"cksum success: bits %08X
\n
"
,
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
));
}
static
inline
void
igb_rx_hash
(
struct
igb_ring
*
ring
,
union
e1000_adv_rx_desc
*
rx_desc
,
struct
sk_buff
*
skb
)
{
if
(
ring
->
netdev
->
features
&
NETIF_F_RXHASH
)
skb
->
rxhash
=
le32_to_cpu
(
rx_desc
->
wb
.
lower
.
hi_dword
.
rss
);
}
}
static
void
igb_rx_hwtstamp
(
struct
igb_q_vector
*
q_vector
,
u32
staterr
,
static
void
igb_rx_hwtstamp
(
struct
igb_q_vector
*
q_vector
,
union
e1000_adv_rx_desc
*
rx_desc
,
struct
sk_buff
*
skb
)
struct
sk_buff
*
skb
)
{
{
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
u64
regval
;
u64
regval
;
if
(
!
igb_test_staterr
(
rx_desc
,
E1000_RXDADV_STAT_TSIP
|
E1000_RXDADV_STAT_TS
))
return
;
/*
/*
* If this bit is set, then the RX registers contain the time stamp. No
* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
* other packet will be time stamped until we read these registers, so
...
@@ -5802,7 +5867,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
...
@@ -5802,7 +5867,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
* If nothing went wrong, then it should have a shared tx_flags that we
* If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
* can turn into a skb_shared_hwtstamps.
*/
*/
if
(
staterr
&
E1000_RXDADV_STAT_TSIP
)
{
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXDADV_STAT_TSIP
)
)
{
u32
*
stamp
=
(
u32
*
)
skb
->
data
;
u32
*
stamp
=
(
u32
*
)
skb
->
data
;
regval
=
le32_to_cpu
(
*
(
stamp
+
2
));
regval
=
le32_to_cpu
(
*
(
stamp
+
2
));
regval
|=
(
u64
)
le32_to_cpu
(
*
(
stamp
+
3
))
<<
32
;
regval
|=
(
u64
)
le32_to_cpu
(
*
(
stamp
+
3
))
<<
32
;
...
@@ -5832,18 +5897,16 @@ static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
...
@@ -5832,18 +5897,16 @@ static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
static
bool
igb_clean_rx_irq
(
struct
igb_q_vector
*
q_vector
,
int
budget
)
static
bool
igb_clean_rx_irq
(
struct
igb_q_vector
*
q_vector
,
int
budget
)
{
{
struct
igb_ring
*
rx_ring
=
q_vector
->
rx
_
ring
;
struct
igb_ring
*
rx_ring
=
q_vector
->
rx
.
ring
;
union
e1000_adv_rx_desc
*
rx_desc
;
union
e1000_adv_rx_desc
*
rx_desc
;
const
int
current_node
=
numa_node_id
();
const
int
current_node
=
numa_node_id
();
unsigned
int
total_bytes
=
0
,
total_packets
=
0
;
unsigned
int
total_bytes
=
0
,
total_packets
=
0
;
u32
staterr
;
u16
cleaned_count
=
igb_desc_unused
(
rx_ring
);
u16
cleaned_count
=
igb_desc_unused
(
rx_ring
);
u16
i
=
rx_ring
->
next_to_clean
;
u16
i
=
rx_ring
->
next_to_clean
;
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
i
);
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
i
);
staterr
=
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
);
while
(
staterr
&
E1000_RXD_STAT_DD
)
{
while
(
igb_test_staterr
(
rx_desc
,
E1000_RXD_STAT_DD
)
)
{
struct
igb_rx_buffer
*
buffer_info
=
&
rx_ring
->
rx_buffer_info
[
i
];
struct
igb_rx_buffer
*
buffer_info
=
&
rx_ring
->
rx_buffer_info
[
i
];
struct
sk_buff
*
skb
=
buffer_info
->
skb
;
struct
sk_buff
*
skb
=
buffer_info
->
skb
;
union
e1000_adv_rx_desc
*
next_rxd
;
union
e1000_adv_rx_desc
*
next_rxd
;
...
@@ -5896,7 +5959,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
...
@@ -5896,7 +5959,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
buffer_info
->
page_dma
=
0
;
buffer_info
->
page_dma
=
0
;
}
}
if
(
!
(
staterr
&
E1000_RXD_STAT_EOP
))
{
if
(
!
igb_test_staterr
(
rx_desc
,
E1000_RXD_STAT_EOP
))
{
struct
igb_rx_buffer
*
next_buffer
;
struct
igb_rx_buffer
*
next_buffer
;
next_buffer
=
&
rx_ring
->
rx_buffer_info
[
i
];
next_buffer
=
&
rx_ring
->
rx_buffer_info
[
i
];
buffer_info
->
skb
=
next_buffer
->
skb
;
buffer_info
->
skb
=
next_buffer
->
skb
;
...
@@ -5906,25 +5969,27 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
...
@@ -5906,25 +5969,27 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
goto
next_desc
;
goto
next_desc
;
}
}
if
(
staterr
&
E1000_RXDEXT_ERR_FRAME_ERR_MASK
)
{
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXDEXT_ERR_FRAME_ERR_MASK
))
{
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
goto
next_desc
;
goto
next_desc
;
}
}
if
(
staterr
&
(
E1000_RXDADV_STAT_TSIP
|
E1000_RXDADV_STAT_TS
))
igb_rx_hwtstamp
(
q_vector
,
rx_desc
,
skb
);
igb_rx_hwtstamp
(
q_vector
,
staterr
,
skb
);
igb_rx_hash
(
rx_ring
,
rx_desc
,
skb
);
total_bytes
+=
skb
->
len
;
igb_rx_checksum
(
rx_ring
,
rx_desc
,
skb
);
total_packets
++
;
igb_rx_checksum
(
rx_ring
,
staterr
,
skb
);
skb
->
protocol
=
eth_type_trans
(
skb
,
rx_ring
->
netdev
);
if
(
staterr
&
E1000_RXD_STAT_VP
)
{
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXD_STAT_VP
)
)
{
u16
vid
=
le16_to_cpu
(
rx_desc
->
wb
.
upper
.
vlan
);
u16
vid
=
le16_to_cpu
(
rx_desc
->
wb
.
upper
.
vlan
);
__vlan_hwaccel_put_tag
(
skb
,
vid
);
__vlan_hwaccel_put_tag
(
skb
,
vid
);
}
}
total_bytes
+=
skb
->
len
;
total_packets
++
;
skb
->
protocol
=
eth_type_trans
(
skb
,
rx_ring
->
netdev
);
napi_gro_receive
(
&
q_vector
->
napi
,
skb
);
napi_gro_receive
(
&
q_vector
->
napi
,
skb
);
budget
--
;
budget
--
;
...
@@ -5941,7 +6006,6 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
...
@@ -5941,7 +6006,6 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
/* use prefetched values */
/* use prefetched values */
rx_desc
=
next_rxd
;
rx_desc
=
next_rxd
;
staterr
=
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
);
}
}
rx_ring
->
next_to_clean
=
i
;
rx_ring
->
next_to_clean
=
i
;
...
@@ -5949,8 +6013,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
...
@@ -5949,8 +6013,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
rx_ring
->
rx_stats
.
packets
+=
total_packets
;
rx_ring
->
rx_stats
.
packets
+=
total_packets
;
rx_ring
->
rx_stats
.
bytes
+=
total_bytes
;
rx_ring
->
rx_stats
.
bytes
+=
total_bytes
;
u64_stats_update_end
(
&
rx_ring
->
rx_syncp
);
u64_stats_update_end
(
&
rx_ring
->
rx_syncp
);
rx_ring
->
total_packets
+=
total_packets
;
q_vector
->
rx
.
total_packets
+=
total_packets
;
rx_ring
->
total_bytes
+=
total_bytes
;
q_vector
->
rx
.
total_bytes
+=
total_bytes
;
if
(
cleaned_count
)
if
(
cleaned_count
)
igb_alloc_rx_buffers
(
rx_ring
,
cleaned_count
);
igb_alloc_rx_buffers
(
rx_ring
,
cleaned_count
);
...
@@ -6336,10 +6400,9 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
...
@@ -6336,10 +6400,9 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
struct
igb_adapter
*
adapter
=
netdev_priv
(
netdev
);
struct
igb_adapter
*
adapter
=
netdev_priv
(
netdev
);
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
u32
ctrl
,
rctl
;
u32
ctrl
,
rctl
;
bool
enable
=
!!
(
features
&
NETIF_F_HW_VLAN_RX
);
igb_irq_disable
(
adapter
);
if
(
enable
)
{
if
(
features
&
NETIF_F_HW_VLAN_RX
)
{
/* enable VLAN tag insert/strip */
/* enable VLAN tag insert/strip */
ctrl
=
rd32
(
E1000_CTRL
);
ctrl
=
rd32
(
E1000_CTRL
);
ctrl
|=
E1000_CTRL_VME
;
ctrl
|=
E1000_CTRL_VME
;
...
@@ -6357,9 +6420,6 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
...
@@ -6357,9 +6420,6 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
}
}
igb_rlpml_set
(
adapter
);
igb_rlpml_set
(
adapter
);
if
(
!
test_bit
(
__IGB_DOWN
,
&
adapter
->
state
))
igb_irq_enable
(
adapter
);
}
}
static
void
igb_vlan_rx_add_vid
(
struct
net_device
*
netdev
,
u16
vid
)
static
void
igb_vlan_rx_add_vid
(
struct
net_device
*
netdev
,
u16
vid
)
...
@@ -6384,11 +6444,6 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
...
@@ -6384,11 +6444,6 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
int
pf_id
=
adapter
->
vfs_allocated_count
;
int
pf_id
=
adapter
->
vfs_allocated_count
;
s32
err
;
s32
err
;
igb_irq_disable
(
adapter
);
if
(
!
test_bit
(
__IGB_DOWN
,
&
adapter
->
state
))
igb_irq_enable
(
adapter
);
/* remove vlan from VLVF table array */
/* remove vlan from VLVF table array */
err
=
igb_vlvf_set
(
adapter
,
vid
,
false
,
pf_id
);
err
=
igb_vlvf_set
(
adapter
,
vid
,
false
,
pf_id
);
...
@@ -6403,6 +6458,8 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
...
@@ -6403,6 +6458,8 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
{
{
u16
vid
;
u16
vid
;
igb_vlan_mode
(
adapter
->
netdev
,
adapter
->
netdev
->
features
);
for_each_set_bit
(
vid
,
adapter
->
active_vlans
,
VLAN_N_VID
)
for_each_set_bit
(
vid
,
adapter
->
active_vlans
,
VLAN_N_VID
)
igb_vlan_rx_add_vid
(
adapter
->
netdev
,
vid
);
igb_vlan_rx_add_vid
(
adapter
->
netdev
,
vid
);
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment