Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
21e84257
Commit
21e84257
authored
Jun 23, 2011
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6
parents
d18cd551
a38a104d
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
850 additions
and
456 deletions
+850
-456
drivers/net/ixgbe/ixgbe.h
drivers/net/ixgbe/ixgbe.h
+23
-6
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_82599.c
+346
-257
drivers/net/ixgbe/ixgbe_dcb_nl.c
drivers/net/ixgbe/ixgbe_dcb_nl.c
+6
-7
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_ethtool.c
+401
-143
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_main.c
+63
-26
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ixgbe/ixgbe_type.h
+11
-17
No files found.
drivers/net/ixgbe/ixgbe.h
View file @
21e84257
...
...
@@ -482,6 +482,17 @@ struct ixgbe_adapter {
struct
vf_macvlans
vf_mvs
;
struct
vf_macvlans
*
mv_list
;
bool
antispoofing_enabled
;
struct
hlist_head
fdir_filter_list
;
union
ixgbe_atr_input
fdir_mask
;
int
fdir_filter_count
;
};
struct
ixgbe_fdir_filter
{
struct
hlist_node
fdir_node
;
union
ixgbe_atr_input
filter
;
u16
sw_idx
;
u16
action
;
};
enum
ixbge_state_t
{
...
...
@@ -543,16 +554,22 @@ extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern
void
ixgbe_write_eitr
(
struct
ixgbe_q_vector
*
);
extern
int
ethtool_ioctl
(
struct
ifreq
*
ifr
);
extern
s32
ixgbe_reinit_fdir_tables_82599
(
struct
ixgbe_hw
*
hw
);
extern
s32
ixgbe_init_fdir_signature_82599
(
struct
ixgbe_hw
*
hw
,
u32
pballoc
);
extern
s32
ixgbe_init_fdir_perfect_82599
(
struct
ixgbe_hw
*
hw
,
u32
pballoc
);
extern
s32
ixgbe_init_fdir_signature_82599
(
struct
ixgbe_hw
*
hw
,
u32
fdirctrl
);
extern
s32
ixgbe_init_fdir_perfect_82599
(
struct
ixgbe_hw
*
hw
,
u32
fdirctrl
);
extern
s32
ixgbe_fdir_add_signature_filter_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_hash_dword
input
,
union
ixgbe_atr_hash_dword
common
,
u8
queue
);
extern
s32
ixgbe_fdir_add_perfect_filter_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_input
*
input
,
struct
ixgbe_atr_input_masks
*
input_masks
,
u16
soft_id
,
u8
queue
);
extern
s32
ixgbe_fdir_set_input_mask_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_input
*
input_mask
);
extern
s32
ixgbe_fdir_write_perfect_filter_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_input
*
input
,
u16
soft_id
,
u8
queue
);
extern
s32
ixgbe_fdir_erase_perfect_filter_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_input
*
input
,
u16
soft_id
);
extern
void
ixgbe_atr_compute_perfect_hash_82599
(
union
ixgbe_atr_input
*
input
,
union
ixgbe_atr_input
*
mask
);
extern
void
ixgbe_configure_rscctl
(
struct
ixgbe_adapter
*
adapter
,
struct
ixgbe_ring
*
ring
);
extern
void
ixgbe_clear_rscctl
(
struct
ixgbe_adapter
*
adapter
,
...
...
drivers/net/ixgbe/ixgbe_82599.c
View file @
21e84257
...
...
@@ -1107,115 +1107,87 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
/**
* ixgbe_
init_fdir_signature_82599 - Initialize Flow Director signature filters
* ixgbe_
set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer
* @hw: pointer to hardware structure
* @pballoc: which mode to allocate filters with
**/
s
32
ixgbe_init_fdir_signature_82599
(
struct
ixgbe_hw
*
hw
,
u32
pballoc
)
s
tatic
s32
ixgbe_set_fdir_rxpba_82599
(
struct
ixgbe_hw
*
hw
,
const
u32
pballoc
)
{
u32
fdirctrl
=
0
;
u32
fdir_pbsize
=
hw
->
mac
.
rx_pb_size
<<
IXGBE_RXPBSIZE_SHIFT
;
u32
current_rxpbsize
=
0
;
int
i
;
/* Send interrupt when 64 filters are left */
fdirctrl
|=
4
<<
IXGBE_FDIRCTRL_FULL_THRESH_SHIFT
;
/* Set the maximum length per hash bucket to 0xA filters */
fdirctrl
|=
0xA
<<
IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT
;
/* reserve space for Flow Director filters */
switch
(
pballoc
)
{
case
IXGBE_FDIR_PBALLOC_64K
:
/* 8k - 1 signature filters */
fdirctrl
|=
IXGBE_FDIRCTRL_PBALLOC_64K
;
case
IXGBE_FDIR_PBALLOC_256K
:
fdir_pbsize
-=
256
<<
IXGBE_RXPBSIZE_SHIFT
;
break
;
case
IXGBE_FDIR_PBALLOC_128K
:
/* 16k - 1 signature filters */
fdirctrl
|=
IXGBE_FDIRCTRL_PBALLOC_128K
;
fdir_pbsize
-=
128
<<
IXGBE_RXPBSIZE_SHIFT
;
break
;
case
IXGBE_FDIR_PBALLOC_256K
:
/* 32k - 1 signature filters */
fdirctrl
|=
IXGBE_FDIRCTRL_PBALLOC_256K
;
case
IXGBE_FDIR_PBALLOC_64K
:
fdir_pbsize
-=
64
<<
IXGBE_RXPBSIZE_SHIFT
;
break
;
case
IXGBE_FDIR_PBALLOC_NONE
:
default:
/* bad value */
return
IXGBE_ERR_CONFIG
;
return
IXGBE_ERR_PARAM
;
}
/* Move the flexible bytes to use the ethertype - shift 6 words */
fdirctrl
|=
(
0x6
<<
IXGBE_FDIRCTRL_FLEX_SHIFT
);
/* determine current RX packet buffer size */
for
(
i
=
0
;
i
<
8
;
i
++
)
current_rxpbsize
+=
IXGBE_READ_REG
(
hw
,
IXGBE_RXPBSIZE
(
i
));
/* if there is already room for the filters do nothing */
if
(
current_rxpbsize
<=
fdir_pbsize
)
return
0
;
/* Prime the keys for hashing */
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRHKEY
,
IXGBE_ATR_BUCKET_HASH_KEY
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRSKEY
,
IXGBE_ATR_SIGNATURE_HASH_KEY
);
/*
* Poll init-done after we write the register. Estimated times:
* 10G: PBALLOC = 11b, timing is 60us
* 1G: PBALLOC = 11b, timing is 600us
* 100M: PBALLOC = 11b, timing is 6ms
*
* Multiple these timings by 4 if under full Rx load
*
* So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
* 1 msec per poll time. If we're at line rate and drop to 100M, then
* this might not finish in our poll time, but we can live with that
* for now.
*/
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRCTRL
,
fdirctrl
);
IXGBE_WRITE_FLUSH
(
hw
);
for
(
i
=
0
;
i
<
IXGBE_FDIR_INIT_DONE_POLL
;
i
++
)
{
if
(
IXGBE_READ_REG
(
hw
,
IXGBE_FDIRCTRL
)
&
IXGBE_FDIRCTRL_INIT_DONE
)
break
;
usleep_range
(
1000
,
2000
);
if
(
current_rxpbsize
>
hw
->
mac
.
rx_pb_size
)
{
/*
* if rxpbsize is greater than max then HW max the Rx buffer
* sizes are unconfigured or misconfigured since HW default is
* to give the full buffer to each traffic class resulting in
* the total size being buffer size 8x actual size
*
* This assumes no DCB since the RXPBSIZE registers appear to
* be unconfigured.
*/
IXGBE_WRITE_REG
(
hw
,
IXGBE_RXPBSIZE
(
0
),
fdir_pbsize
);
for
(
i
=
1
;
i
<
8
;
i
++
)
IXGBE_WRITE_REG
(
hw
,
IXGBE_RXPBSIZE
(
i
),
0
);
}
else
{
/*
* Since the Rx packet buffer appears to have already been
* configured we need to shrink each packet buffer by enough
* to make room for the filters. As such we take each rxpbsize
* value and multiply it by a fraction representing the size
* needed over the size we currently have.
*
* We need to reduce fdir_pbsize and current_rxpbsize to
* 1/1024 of their original values in order to avoid
* overflowing the u32 being used to store rxpbsize.
*/
fdir_pbsize
>>=
IXGBE_RXPBSIZE_SHIFT
;
current_rxpbsize
>>=
IXGBE_RXPBSIZE_SHIFT
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
u32
rxpbsize
=
IXGBE_READ_REG
(
hw
,
IXGBE_RXPBSIZE
(
i
));
rxpbsize
*=
fdir_pbsize
;
rxpbsize
/=
current_rxpbsize
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_RXPBSIZE
(
i
),
rxpbsize
);
}
}
if
(
i
>=
IXGBE_FDIR_INIT_DONE_POLL
)
hw_dbg
(
hw
,
"Flow Director Signature poll time exceeded!
\n
"
);
return
0
;
}
/**
* ixgbe_
init_fdir_perfect_82599 - Initialize Flow Director perfect fil
ters
* ixgbe_
fdir_enable_82599 - Initialize Flow Director control regis
ters
* @hw: pointer to hardware structure
* @
pballoc: which mode to allocate filters with
* @
fdirctrl: value to write to flow director control register
**/
s
32
ixgbe_init_fdir_perfect_82599
(
struct
ixgbe_hw
*
hw
,
u32
pballoc
)
s
tatic
void
ixgbe_fdir_enable_82599
(
struct
ixgbe_hw
*
hw
,
u32
fdirctrl
)
{
u32
fdirctrl
=
0
;
int
i
;
/* Send interrupt when 64 filters are left */
fdirctrl
|=
4
<<
IXGBE_FDIRCTRL_FULL_THRESH_SHIFT
;
/* Initialize the drop queue to Rx queue 127 */
fdirctrl
|=
(
127
<<
IXGBE_FDIRCTRL_DROP_Q_SHIFT
);
switch
(
pballoc
)
{
case
IXGBE_FDIR_PBALLOC_64K
:
/* 2k - 1 perfect filters */
fdirctrl
|=
IXGBE_FDIRCTRL_PBALLOC_64K
;
break
;
case
IXGBE_FDIR_PBALLOC_128K
:
/* 4k - 1 perfect filters */
fdirctrl
|=
IXGBE_FDIRCTRL_PBALLOC_128K
;
break
;
case
IXGBE_FDIR_PBALLOC_256K
:
/* 8k - 1 perfect filters */
fdirctrl
|=
IXGBE_FDIRCTRL_PBALLOC_256K
;
break
;
default:
/* bad value */
return
IXGBE_ERR_CONFIG
;
}
/* Turn perfect match filtering on */
fdirctrl
|=
IXGBE_FDIRCTRL_PERFECT_MATCH
;
fdirctrl
|=
IXGBE_FDIRCTRL_REPORT_STATUS
;
/* Move the flexible bytes to use the ethertype - shift 6 words */
fdirctrl
|=
(
0x6
<<
IXGBE_FDIRCTRL_FLEX_SHIFT
);
/* Prime the keys for hashing */
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRHKEY
,
IXGBE_ATR_BUCKET_HASH_KEY
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRSKEY
,
IXGBE_ATR_SIGNATURE_HASH_KEY
);
...
...
@@ -1233,10 +1205,6 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
* this might not finish in our poll time, but we can live with that
* for now.
*/
/* Set the maximum length per hash bucket to 0xA filters */
fdirctrl
|=
(
0xA
<<
IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRCTRL
,
fdirctrl
);
IXGBE_WRITE_FLUSH
(
hw
);
for
(
i
=
0
;
i
<
IXGBE_FDIR_INIT_DONE_POLL
;
i
++
)
{
...
...
@@ -1245,101 +1213,77 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
break
;
usleep_range
(
1000
,
2000
);
}
if
(
i
>=
IXGBE_FDIR_INIT_DONE_POLL
)
hw_dbg
(
hw
,
"Flow Director Perfect poll time exceeded!
\n
"
);
return
0
;
if
(
i
>=
IXGBE_FDIR_INIT_DONE_POLL
)
hw_dbg
(
hw
,
"Flow Director poll time exceeded!
\n
"
);
}
/**
* ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
* @stream: input bitstream to compute the hash on
* @key: 32-bit hash key
* ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
* @hw: pointer to hardware structure
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
static
u32
ixgbe_atr_compute_hash_82599
(
union
ixgbe_atr_input
*
atr_input
,
u32
key
)
s32
ixgbe_init_fdir_signature_82599
(
struct
ixgbe_hw
*
hw
,
u32
fdirctrl
)
{
/*
* The algorithm is as follows:
* Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
* where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
* and A[n] x B[n] is bitwise AND between same length strings
*
* K[n] is 16 bits, defined as:
* for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
* for n modulo 32 < 15, K[n] =
* K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
*
* S[n] is 16 bits, defined as:
* for n >= 15, S[n] = S[n:n - 15]
* for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
*
* To simplify for programming, the algorithm is implemented
* in software this way:
*
* key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
*
* for (i = 0; i < 352; i+=32)
* hi_hash_dword[31:0] ^= Stream[(i+31):i];
*
* lo_hash_dword[15:0] ^= Stream[15:0];
* lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
* lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
*
* hi_hash_dword[31:0] ^= Stream[351:320];
*
* if(key[0])
* hash[15:0] ^= Stream[15:0];
*
* for (i = 0; i < 16; i++) {
* if (key[i])
* hash[15:0] ^= lo_hash_dword[(i+15):i];
* if (key[i + 16])
* hash[15:0] ^= hi_hash_dword[(i+15):i];
* }
*
*/
__be32
common_hash_dword
=
0
;
u32
hi_hash_dword
,
lo_hash_dword
,
flow_vm_vlan
;
u32
hash_result
=
0
;
u8
i
;
s32
err
;
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan
=
ntohl
(
atr_input
->
dword_stream
[
0
]);
/* Before enabling Flow Director, verify the Rx Packet Buffer size */
err
=
ixgbe_set_fdir_rxpba_82599
(
hw
,
fdirctrl
);
if
(
err
)
return
err
;
/* generate common hash dword */
for
(
i
=
10
;
i
;
i
-=
2
)
common_hash_dword
^=
atr_input
->
dword_stream
[
i
]
^
atr_input
->
dword_stream
[
i
-
1
];
/*
* Continue setup of fdirctrl register bits:
* Move the flexible bytes to use the ethertype - shift 6 words
* Set the maximum length per hash bucket to 0xA filters
* Send interrupt when 64 filters are left
*/
fdirctrl
|=
(
0x6
<<
IXGBE_FDIRCTRL_FLEX_SHIFT
)
|
(
0xA
<<
IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT
)
|
(
4
<<
IXGBE_FDIRCTRL_FULL_THRESH_SHIFT
);
hi_hash_dword
=
ntohl
(
common_hash_dword
);
/* write hashes and fdirctrl register, poll for completion */
ixgbe_fdir_enable_82599
(
hw
,
fdirctrl
);
/* low dword is word swapped version of common */
lo_hash_dword
=
(
hi_hash_dword
>>
16
)
|
(
hi_hash_dword
<<
16
);
return
0
;
}
/* apply flow ID/VM pool/VLAN ID bits to hash words */
hi_hash_dword
^=
flow_vm_vlan
^
(
flow_vm_vlan
>>
16
);
/**
* ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
* @hw: pointer to hardware structure
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
s32
ixgbe_init_fdir_perfect_82599
(
struct
ixgbe_hw
*
hw
,
u32
fdirctrl
)
{
s32
err
;
/* Process bits 0 and 16 */
if
(
key
&
0x0001
)
hash_result
^=
lo_hash_dword
;
if
(
key
&
0x00010000
)
hash_result
^=
hi_hash_dword
;
/* Before enabling Flow Director, verify the Rx Packet Buffer size */
err
=
ixgbe_set_fdir_rxpba_82599
(
hw
,
fdirctrl
);
if
(
err
)
return
err
;
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* delay this because bit 0 of the stream should not be processed
* so we do not add the vlan until after bit 0 was processed
* Continue setup of fdirctrl register bits:
* Turn perfect match filtering on
* Report hash in RSS field of Rx wb descriptor
* Initialize the drop queue
* Move the flexible bytes to use the ethertype - shift 6 words
* Set the maximum length per hash bucket to 0xA filters
* Send interrupt when 64 (0x4 * 16) filters are left
*/
lo_hash_dword
^=
flow_vm_vlan
^
(
flow_vm_vlan
<<
16
);
fdirctrl
|=
IXGBE_FDIRCTRL_PERFECT_MATCH
|
IXGBE_FDIRCTRL_REPORT_STATUS
|
(
IXGBE_FDIR_DROP_QUEUE
<<
IXGBE_FDIRCTRL_DROP_Q_SHIFT
)
|
(
0x6
<<
IXGBE_FDIRCTRL_FLEX_SHIFT
)
|
(
0xA
<<
IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT
)
|
(
4
<<
IXGBE_FDIRCTRL_FULL_THRESH_SHIFT
);
/* write hashes and fdirctrl register, poll for completion */
ixgbe_fdir_enable_82599
(
hw
,
fdirctrl
);
/* process the remaining 30 bits in the key 2 bits at a time */
for
(
i
=
15
;
i
;
i
--
)
{
if
(
key
&
(
0x0001
<<
i
))
hash_result
^=
lo_hash_dword
>>
i
;
if
(
key
&
(
0x00010000
<<
i
))
hash_result
^=
hi_hash_dword
>>
i
;
}
return
hash_result
&
IXGBE_ATR_HASH_MASK
;
return
0
;
}
/*
...
...
@@ -1476,7 +1420,6 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
*/
fdirhashcmd
=
(
u64
)
fdircmd
<<
32
;
fdirhashcmd
|=
ixgbe_atr_compute_sig_hash_82599
(
input
,
common
);
IXGBE_WRITE_REG64
(
hw
,
IXGBE_FDIRHASH
,
fdirhashcmd
);
hw_dbg
(
hw
,
"Tx Queue=%x hash=%x
\n
"
,
queue
,
(
u32
)
fdirhashcmd
);
...
...
@@ -1484,6 +1427,101 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
return
0
;
}
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
bucket_hash ^= hi_hash_dword >> n; \
} while (0);
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
* @atr_input: input bitstream to compute the hash on
* @input_mask: mask for the input bitstream
*
* This function serves two main purposes. First it applys the input_mask
* to the atr_input resulting in a cleaned up atr_input data stream.
* Secondly it computes the hash and stores it in the bkt_hash field at
* the end of the input byte stream. This way it will be available for
* future use without needing to recompute the hash.
**/
void
ixgbe_atr_compute_perfect_hash_82599
(
union
ixgbe_atr_input
*
input
,
union
ixgbe_atr_input
*
input_mask
)
{
u32
hi_hash_dword
,
lo_hash_dword
,
flow_vm_vlan
;
u32
bucket_hash
=
0
;
/* Apply masks to input data */
input
->
dword_stream
[
0
]
&=
input_mask
->
dword_stream
[
0
];
input
->
dword_stream
[
1
]
&=
input_mask
->
dword_stream
[
1
];
input
->
dword_stream
[
2
]
&=
input_mask
->
dword_stream
[
2
];
input
->
dword_stream
[
3
]
&=
input_mask
->
dword_stream
[
3
];
input
->
dword_stream
[
4
]
&=
input_mask
->
dword_stream
[
4
];
input
->
dword_stream
[
5
]
&=
input_mask
->
dword_stream
[
5
];
input
->
dword_stream
[
6
]
&=
input_mask
->
dword_stream
[
6
];
input
->
dword_stream
[
7
]
&=
input_mask
->
dword_stream
[
7
];
input
->
dword_stream
[
8
]
&=
input_mask
->
dword_stream
[
8
];
input
->
dword_stream
[
9
]
&=
input_mask
->
dword_stream
[
9
];
input
->
dword_stream
[
10
]
&=
input_mask
->
dword_stream
[
10
];
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan
=
ntohl
(
input
->
dword_stream
[
0
]);
/* generate common hash dword */
hi_hash_dword
=
ntohl
(
input
->
dword_stream
[
1
]
^
input
->
dword_stream
[
2
]
^
input
->
dword_stream
[
3
]
^
input
->
dword_stream
[
4
]
^
input
->
dword_stream
[
5
]
^
input
->
dword_stream
[
6
]
^
input
->
dword_stream
[
7
]
^
input
->
dword_stream
[
8
]
^
input
->
dword_stream
[
9
]
^
input
->
dword_stream
[
10
]);
/* low dword is word swapped version of common */
lo_hash_dword
=
(
hi_hash_dword
>>
16
)
|
(
hi_hash_dword
<<
16
);
/* apply flow ID/VM pool/VLAN ID bits to hash words */
hi_hash_dword
^=
flow_vm_vlan
^
(
flow_vm_vlan
>>
16
);
/* Process bits 0 and 16 */
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
0
);
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* delay this because bit 0 of the stream should not be processed
* so we do not add the vlan until after bit 0 was processed
*/
lo_hash_dword
^=
flow_vm_vlan
^
(
flow_vm_vlan
<<
16
);
/* Process remaining 30 bit of the key */
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
1
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
2
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
3
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
4
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
5
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
6
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
7
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
8
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
9
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
10
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
11
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
12
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
13
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
14
);
IXGBE_COMPUTE_BKT_HASH_ITERATION
(
15
);
/*
* Limit hash to 13 bits since max bucket count is 8K.
* Store result at the end of the input stream.
*/
input
->
formatted
.
bkt_hash
=
bucket_hash
&
0x1FFF
;
}
/**
* ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
* @input_mask: mask to be bit swapped
...
...
@@ -1493,11 +1531,11 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* generate a correctly swapped value we need to bit swap the mask and that
* is what is accomplished by this function.
**/
static
u32
ixgbe_get_fdirtcpm_82599
(
struct
ixgbe_atr_input_masks
*
input_masks
)
static
u32
ixgbe_get_fdirtcpm_82599
(
union
ixgbe_atr_input
*
input_mask
)
{
u32
mask
=
ntohs
(
input_mask
s
->
dst_port_mask
);
u32
mask
=
ntohs
(
input_mask
->
formatted
.
dst_port
);
mask
<<=
IXGBE_FDIRTCPM_DPORTM_SHIFT
;
mask
|=
ntohs
(
input_mask
s
->
src_port_mask
);
mask
|=
ntohs
(
input_mask
->
formatted
.
src_port
);
mask
=
((
mask
&
0x55555555
)
<<
1
)
|
((
mask
&
0xAAAAAAAA
)
>>
1
);
mask
=
((
mask
&
0x33333333
)
<<
2
)
|
((
mask
&
0xCCCCCCCC
)
>>
2
);
mask
=
((
mask
&
0x0F0F0F0F
)
<<
4
)
|
((
mask
&
0xF0F0F0F0
)
>>
4
);
...
...
@@ -1519,52 +1557,14 @@ static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
#define IXGBE_STORE_AS_BE16(_value) \
(((u16)(_value) >> 8) | ((u16)(_value) << 8))
ntohs
(((u16)(_value) >> 8) | ((u16)(_value) << 8))
/**
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
* @input_masks: bitwise masks for relevant fields
* @soft_id: software index into the silicon hash tables for filter storage
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32
ixgbe_fdir_add_perfect_filter_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_input
*
input
,
struct
ixgbe_atr_input_masks
*
input_masks
,
u16
soft_id
,
u8
queue
)
s32
ixgbe_fdir_set_input_mask_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_input
*
input_mask
)
{
u32
fdirhash
;
u32
fdircmd
;
u32
fdirport
,
fdirtcpm
;
u32
fdirvlan
;
/* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
u32
fdirm
=
IXGBE_FDIRM_VLANID
|
IXGBE_FDIRM_VLANP
|
IXGBE_FDIRM_FLEX
|
IXGBE_FDIRM_POOL
|
IXGBE_FDIRM_DIPv6
;
/*
* Check flow_type formatting, and bail out before we touch the hardware
* if there's a configuration issue
*/
switch
(
input
->
formatted
.
flow_type
)
{
case
IXGBE_ATR_FLOW_TYPE_IPV4
:
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm
|=
IXGBE_FDIRM_L4P
;
case
IXGBE_ATR_FLOW_TYPE_SCTPV4
:
if
(
input_masks
->
dst_port_mask
||
input_masks
->
src_port_mask
)
{
hw_dbg
(
hw
,
" Error on src/dst port mask
\n
"
);
return
IXGBE_ERR_CONFIG
;
}
case
IXGBE_ATR_FLOW_TYPE_TCPV4
:
case
IXGBE_ATR_FLOW_TYPE_UDPV4
:
break
;
default:
hw_dbg
(
hw
,
" Error on flow type input
\n
"
);
return
IXGBE_ERR_CONFIG
;
}
/* mask IPv6 since it is currently not supported */
u32
fdirm
=
IXGBE_FDIRM_DIPv6
;
u32
fdirtcpm
;
/*
* Program the relevant mask registers. If src/dst_port or src/dst_addr
...
...
@@ -1576,41 +1576,71 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
* point in time.
*/
/* Program FDIRM */
switch
(
ntohs
(
input_masks
->
vlan_id_mask
)
&
0xEFFF
)
{
case
0xEFFF
:
/* Unmask VLAN ID - bit 0 and fall through to unmask prio */
fdirm
&=
~
IXGBE_FDIRM_VLANID
;
case
0xE000
:
/* Unmask VLAN prio - bit 1 */
fdirm
&=
~
IXGBE_FDIRM_VLANP
;
/* verify bucket hash is cleared on hash generation */
if
(
input_mask
->
formatted
.
bkt_hash
)
hw_dbg
(
hw
,
" bucket hash should always be 0 in mask
\n
"
);
/* Program FDIRM and verify partial masks */
switch
(
input_mask
->
formatted
.
vm_pool
&
0x7F
)
{
case
0x0
:
fdirm
|=
IXGBE_FDIRM_POOL
;
case
0x7F
:
break
;
case
0x0FFF
:
/* Unmask VLAN ID - bit 0 */
fdirm
&=
~
IXGBE_FDIRM_VLANID
;
default:
hw_dbg
(
hw
,
" Error on vm pool mask
\n
"
);
return
IXGBE_ERR_CONFIG
;
}
switch
(
input_mask
->
formatted
.
flow_type
&
IXGBE_ATR_L4TYPE_MASK
)
{
case
0x0
:
fdirm
|=
IXGBE_FDIRM_L4P
;
if
(
input_mask
->
formatted
.
dst_port
||
input_mask
->
formatted
.
src_port
)
{
hw_dbg
(
hw
,
" Error on src/dst port mask
\n
"
);
return
IXGBE_ERR_CONFIG
;
}
case
IXGBE_ATR_L4TYPE_MASK
:
break
;
default:
hw_dbg
(
hw
,
" Error on flow type mask
\n
"
);
return
IXGBE_ERR_CONFIG
;
}
switch
(
ntohs
(
input_mask
->
formatted
.
vlan_id
)
&
0xEFFF
)
{
case
0x0000
:
/* do nothing, vlans already masked */
/* mask VLAN ID, fall through to mask VLAN priority */
fdirm
|=
IXGBE_FDIRM_VLANID
;
case
0x0FFF
:
/* mask VLAN priority */
fdirm
|=
IXGBE_FDIRM_VLANP
;
break
;
case
0xE000
:
/* mask VLAN ID only, fall through */
fdirm
|=
IXGBE_FDIRM_VLANID
;
case
0xEFFF
:
/* no VLAN fields masked */
break
;
default:
hw_dbg
(
hw
,
" Error on VLAN mask
\n
"
);
return
IXGBE_ERR_CONFIG
;
}
if
(
input_masks
->
flex_mask
&
0xFFFF
)
{
if
((
input_masks
->
flex_mask
&
0xFFFF
)
!=
0xFFFF
)
{
hw_dbg
(
hw
,
" Error on flexible byte mask
\n
"
);
return
IXGBE_ERR_CONFIG
;
}
/* Unmask Flex Bytes - bit 4 */
fdirm
&=
~
IXGBE_FDIRM_FLEX
;
switch
(
input_mask
->
formatted
.
flex_bytes
&
0xFFFF
)
{
case
0x0000
:
/* Mask Flex Bytes, fall through */
fdirm
|=
IXGBE_FDIRM_FLEX
;
case
0xFFFF
:
break
;
default:
hw_dbg
(
hw
,
" Error on flexible byte mask
\n
"
);
return
IXGBE_ERR_CONFIG
;
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRM
,
fdirm
);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm
=
ixgbe_get_fdirtcpm_82599
(
input_mask
s
);
fdirtcpm
=
ixgbe_get_fdirtcpm_82599
(
input_mask
);
/* write both the same so that UDP and TCP use the same mask */
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRTCPM
,
~
fdirtcpm
);
...
...
@@ -1618,24 +1648,32 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* store source and destination IP masks (big-enian) */
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRSIP4M
,
~
input_mask
s
->
src_ip_mask
[
0
]);
~
input_mask
->
formatted
.
src_ip
[
0
]);
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRDIP4M
,
~
input_mask
s
->
dst_ip_mask
[
0
]);
~
input_mask
->
formatted
.
dst_ip
[
0
]);
/* Apply masks to input data */
input
->
formatted
.
vlan_id
&=
input_masks
->
vlan_id_mask
;
input
->
formatted
.
flex_bytes
&=
input_masks
->
flex_mask
;
input
->
formatted
.
src_port
&=
input_masks
->
src_port_mask
;
input
->
formatted
.
dst_port
&=
input_masks
->
dst_port_mask
;
input
->
formatted
.
src_ip
[
0
]
&=
input_masks
->
src_ip_mask
[
0
];
input
->
formatted
.
dst_ip
[
0
]
&=
input_masks
->
dst_ip_mask
[
0
];
return
0
;
}
/* record vlan (little-endian) and flex_bytes(big-endian) */
fdirvlan
=
IXGBE_STORE_AS_BE16
(
ntohs
(
input
->
formatted
.
flex_bytes
));
fdirvlan
<<=
IXGBE_FDIRVLAN_FLEX_SHIFT
;
fdirvlan
|=
ntohs
(
input
->
formatted
.
vlan_id
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRVLAN
,
fdirvlan
);
s32
ixgbe_fdir_write_perfect_filter_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_input
*
input
,
u16
soft_id
,
u8
queue
)
{
u32
fdirport
,
fdirvlan
,
fdirhash
,
fdircmd
;
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRSIPv6
(
0
),
input
->
formatted
.
src_ip
[
0
]);
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRSIPv6
(
1
),
input
->
formatted
.
src_ip
[
1
]);
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRSIPv6
(
2
),
input
->
formatted
.
src_ip
[
2
]);
/* record the source address (big-endian) */
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRIPSA
,
input
->
formatted
.
src_ip
[
0
]);
/* record the first 32 bits of the destination address (big-endian) */
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRIPDA
,
input
->
formatted
.
dst_ip
[
0
]);
/* record source and destination port (little-endian)*/
fdirport
=
ntohs
(
input
->
formatted
.
dst_port
);
...
...
@@ -1643,29 +1681,80 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
fdirport
|=
ntohs
(
input
->
formatted
.
src_port
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRPORT
,
fdirport
);
/* record the first 32 bits of the destination address (big-endian) */
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRIPDA
,
input
->
formatted
.
dst_ip
[
0
]);
/* record vlan (little-endian) and flex_bytes(big-endian) */
fdirvlan
=
IXGBE_STORE_AS_BE16
(
input
->
formatted
.
flex_bytes
);
fdirvlan
<<=
IXGBE_FDIRVLAN_FLEX_SHIFT
;
fdirvlan
|=
ntohs
(
input
->
formatted
.
vlan_id
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRVLAN
,
fdirvlan
);
/* record the source address (big-endian) */
IXGBE_WRITE_REG_BE32
(
hw
,
IXGBE_FDIRIPSA
,
input
->
formatted
.
src_ip
[
0
]);
/* configure FDIRHASH register */
fdirhash
=
input
->
formatted
.
bkt_hash
;
fdirhash
|=
soft_id
<<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRHASH
,
fdirhash
);
/*
* flush all previous writes to make certain registers are
* programmed prior to issuing the command
*/
IXGBE_WRITE_FLUSH
(
hw
);
/* configure FDIRCMD register */
fdircmd
=
IXGBE_FDIRCMD_CMD_ADD_FLOW
|
IXGBE_FDIRCMD_FILTER_UPDATE
|
IXGBE_FDIRCMD_LAST
|
IXGBE_FDIRCMD_QUEUE_EN
;
if
(
queue
==
IXGBE_FDIR_DROP_QUEUE
)
fdircmd
|=
IXGBE_FDIRCMD_DROP
;
fdircmd
|=
input
->
formatted
.
flow_type
<<
IXGBE_FDIRCMD_FLOW_TYPE_SHIFT
;
fdircmd
|=
(
u32
)
queue
<<
IXGBE_FDIRCMD_RX_QUEUE_SHIFT
;
fdircmd
|=
(
u32
)
input
->
formatted
.
vm_pool
<<
IXGBE_FDIRCMD_VT_POOL_SHIFT
;
/* we only want the bucket hash so drop the upper 16 bits */
fdirhash
=
ixgbe_atr_compute_hash_82599
(
input
,
IXGBE_ATR_BUCKET_HASH_KEY
);
fdirhash
|=
soft_id
<<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRHASH
,
fdirhash
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRCMD
,
fdircmd
);
return
0
;
}
s32
ixgbe_fdir_erase_perfect_filter_82599
(
struct
ixgbe_hw
*
hw
,
union
ixgbe_atr_input
*
input
,
u16
soft_id
)
{
u32
fdirhash
;
u32
fdircmd
=
0
;
u32
retry_count
;
s32
err
=
0
;
/* configure FDIRHASH register */
fdirhash
=
input
->
formatted
.
bkt_hash
;
fdirhash
|=
soft_id
<<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRHASH
,
fdirhash
);
/* flush hash to HW */
IXGBE_WRITE_FLUSH
(
hw
);
/* Query if filter is present */
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRCMD
,
IXGBE_FDIRCMD_CMD_QUERY_REM_FILT
);
for
(
retry_count
=
10
;
retry_count
;
retry_count
--
)
{
/* allow 10us for query to process */
udelay
(
10
);
/* verify query completed successfully */
fdircmd
=
IXGBE_READ_REG
(
hw
,
IXGBE_FDIRCMD
);
if
(
!
(
fdircmd
&
IXGBE_FDIRCMD_CMD_MASK
))
break
;
}
if
(
!
retry_count
)
err
=
IXGBE_ERR_FDIR_REINIT_FAILED
;
/* if filter exists in hardware then remove it */
if
(
fdircmd
&
IXGBE_FDIRCMD_FILTER_VALID
)
{
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRHASH
,
fdirhash
);
IXGBE_WRITE_FLUSH
(
hw
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_FDIRCMD
,
IXGBE_FDIRCMD_CMD_REMOVE_FLOW
);
}
return
err
;
}
/**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure
...
...
drivers/net/ixgbe/ixgbe_dcb_nl.c
View file @
21e84257
...
...
@@ -114,11 +114,12 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
u8
err
=
0
;
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
netdev
);
/* verify there is something to do, if not then exit */
if
(
!!
state
!=
!
(
adapter
->
flags
&
IXGBE_FLAG_DCB_ENABLED
))
return
err
;
if
(
state
>
0
)
{
/* Turn on DCB */
if
(
adapter
->
flags
&
IXGBE_FLAG_DCB_ENABLED
)
goto
out
;
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
))
{
e_err
(
drv
,
"Enable failed, needs MSI-X
\n
"
);
err
=
1
;
...
...
@@ -143,9 +144,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
ixgbe_setup_tc
(
netdev
,
MAX_TRAFFIC_CLASS
);
}
else
{
/* Turn off DCB */
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_DCB_ENABLED
))
goto
out
;
adapter
->
hw
.
fc
.
requested_mode
=
adapter
->
last_lfc_mode
;
adapter
->
temp_dcb_cfg
.
pfc_mode_enable
=
false
;
adapter
->
dcb_cfg
.
pfc_mode_enable
=
false
;
...
...
@@ -153,7 +151,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
switch
(
adapter
->
hw
.
mac
.
type
)
{
case
ixgbe_mac_82599EB
:
case
ixgbe_mac_X540
:
adapter
->
flags
|=
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
adapter
->
flags
|=
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
break
;
default:
break
;
...
...
drivers/net/ixgbe/ixgbe_ethtool.c
View file @
21e84257
...
...
@@ -442,20 +442,67 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
return
0
;
}
static
void
ixgbe_do_reset
(
struct
net_device
*
netdev
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
netdev
);
if
(
netif_running
(
netdev
))
ixgbe_reinit_locked
(
adapter
);
else
ixgbe_reset
(
adapter
);
}
static
u32
ixgbe_get_rx_csum
(
struct
net_device
*
netdev
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
netdev
);
return
adapter
->
flags
&
IXGBE_FLAG_RX_CSUM_ENABLED
;
}
static
void
ixgbe_set_rsc
(
struct
ixgbe_adapter
*
adapter
)
{
int
i
;
for
(
i
=
0
;
i
<
adapter
->
num_rx_queues
;
i
++
)
{
struct
ixgbe_ring
*
ring
=
adapter
->
rx_ring
[
i
];
if
(
adapter
->
flags2
&
IXGBE_FLAG2_RSC_ENABLED
)
{
set_ring_rsc_enabled
(
ring
);
ixgbe_configure_rscctl
(
adapter
,
ring
);
}
else
{
ixgbe_clear_rscctl
(
adapter
,
ring
);
}
}
}
static
int
ixgbe_set_rx_csum
(
struct
net_device
*
netdev
,
u32
data
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
netdev
);
if
(
data
)
bool
need_reset
=
false
;
if
(
data
)
{
adapter
->
flags
|=
IXGBE_FLAG_RX_CSUM_ENABLED
;
else
}
else
{
adapter
->
flags
&=
~
IXGBE_FLAG_RX_CSUM_ENABLED
;
if
(
adapter
->
flags2
&
IXGBE_FLAG2_RSC_CAPABLE
)
{
adapter
->
flags2
&=
~
IXGBE_FLAG2_RSC_ENABLED
;
netdev
->
features
&=
~
NETIF_F_LRO
;
}
switch
(
adapter
->
hw
.
mac
.
type
)
{
case
ixgbe_mac_X540
:
ixgbe_set_rsc
(
adapter
);
break
;
case
ixgbe_mac_82599EB
:
need_reset
=
true
;
break
;
default:
break
;
}
}
if
(
need_reset
)
ixgbe_do_reset
(
netdev
);
return
0
;
}
...
...
@@ -2234,12 +2281,8 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
* also locks in RSC enable/disable which requires reset
*/
if
(
need_reset
)
{
if
(
netif_running
(
netdev
))
ixgbe_reinit_locked
(
adapter
);
else
ixgbe_reset
(
adapter
);
}
if
(
need_reset
)
ixgbe_do_reset
(
netdev
);
return
0
;
}
...
...
@@ -2281,25 +2324,12 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
}
else
{
adapter
->
flags2
^=
IXGBE_FLAG2_RSC_ENABLED
;
switch
(
adapter
->
hw
.
mac
.
type
)
{
case
ixgbe_mac_X540
:
ixgbe_set_rsc
(
adapter
);
break
;
case
ixgbe_mac_82599EB
:
need_reset
=
true
;
break
;
case
ixgbe_mac_X540
:
{
int
i
;
for
(
i
=
0
;
i
<
adapter
->
num_rx_queues
;
i
++
)
{
struct
ixgbe_ring
*
ring
=
adapter
->
rx_ring
[
i
];
if
(
adapter
->
flags2
&
IXGBE_FLAG2_RSC_ENABLED
)
{
ixgbe_configure_rscctl
(
adapter
,
ring
);
}
else
{
ixgbe_clear_rscctl
(
adapter
,
ring
);
}
}
}
break
;
default:
break
;
}
...
...
@@ -2310,165 +2340,392 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
if
((
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
)
&&
(
!
(
data
&
ETH_FLAG_NTUPLE
)))
{
/* turn off Flow Director perfect, set hash and reset */
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
{
/* turn off ATR, enable perfect filters and reset */
if
(
data
&
ETH_FLAG_NTUPLE
)
{
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
adapter
->
flags
|=
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
need_reset
=
true
;
}
}
else
if
(
!
(
data
&
ETH_FLAG_NTUPLE
))
{
/* turn off Flow Director, set ATR and reset */
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
adapter
->
flags
|=
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
need_reset
=
true
;
}
else
if
((
!
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
&&
(
data
&
ETH_FLAG_NTUPLE
))
{
/* turn off Flow Director hash, enable perfect and reset */
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
adapter
->
flags
|=
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
if
((
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
&&
!
(
adapter
->
flags
&
IXGBE_FLAG_DCB_ENABLED
))
adapter
->
flags
|=
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
need_reset
=
true
;
}
else
{
/* no state change */
}
if
(
need_reset
)
{
if
(
netif_running
(
netdev
))
ixgbe_reinit_locked
(
adapter
);
else
ixgbe_reset
(
adapter
);
if
(
need_reset
)
ixgbe_do_reset
(
netdev
);
return
0
;
}
static
int
ixgbe_get_ethtool_fdir_entry
(
struct
ixgbe_adapter
*
adapter
,
struct
ethtool_rxnfc
*
cmd
)
{
union
ixgbe_atr_input
*
mask
=
&
adapter
->
fdir_mask
;
struct
ethtool_rx_flow_spec
*
fsp
=
(
struct
ethtool_rx_flow_spec
*
)
&
cmd
->
fs
;
struct
hlist_node
*
node
,
*
node2
;
struct
ixgbe_fdir_filter
*
rule
=
NULL
;
/* report total rule count */
cmd
->
data
=
(
1024
<<
adapter
->
fdir_pballoc
)
-
2
;
hlist_for_each_entry_safe
(
rule
,
node
,
node2
,
&
adapter
->
fdir_filter_list
,
fdir_node
)
{
if
(
fsp
->
location
<=
rule
->
sw_idx
)
break
;
}
if
(
!
rule
||
fsp
->
location
!=
rule
->
sw_idx
)
return
-
EINVAL
;
/* fill out the flow spec entry */
/* set flow type field */
switch
(
rule
->
filter
.
formatted
.
flow_type
)
{
case
IXGBE_ATR_FLOW_TYPE_TCPV4
:
fsp
->
flow_type
=
TCP_V4_FLOW
;
break
;
case
IXGBE_ATR_FLOW_TYPE_UDPV4
:
fsp
->
flow_type
=
UDP_V4_FLOW
;
break
;
case
IXGBE_ATR_FLOW_TYPE_SCTPV4
:
fsp
->
flow_type
=
SCTP_V4_FLOW
;
break
;
case
IXGBE_ATR_FLOW_TYPE_IPV4
:
fsp
->
flow_type
=
IP_USER_FLOW
;
fsp
->
h_u
.
usr_ip4_spec
.
ip_ver
=
ETH_RX_NFC_IP4
;
fsp
->
h_u
.
usr_ip4_spec
.
proto
=
0
;
fsp
->
m_u
.
usr_ip4_spec
.
proto
=
0
;
break
;
default:
return
-
EINVAL
;
}
fsp
->
h_u
.
tcp_ip4_spec
.
psrc
=
rule
->
filter
.
formatted
.
src_port
;
fsp
->
m_u
.
tcp_ip4_spec
.
psrc
=
mask
->
formatted
.
src_port
;
fsp
->
h_u
.
tcp_ip4_spec
.
pdst
=
rule
->
filter
.
formatted
.
dst_port
;
fsp
->
m_u
.
tcp_ip4_spec
.
pdst
=
mask
->
formatted
.
dst_port
;
fsp
->
h_u
.
tcp_ip4_spec
.
ip4src
=
rule
->
filter
.
formatted
.
src_ip
[
0
];
fsp
->
m_u
.
tcp_ip4_spec
.
ip4src
=
mask
->
formatted
.
src_ip
[
0
];
fsp
->
h_u
.
tcp_ip4_spec
.
ip4dst
=
rule
->
filter
.
formatted
.
dst_ip
[
0
];
fsp
->
m_u
.
tcp_ip4_spec
.
ip4dst
=
mask
->
formatted
.
dst_ip
[
0
];
fsp
->
h_ext
.
vlan_tci
=
rule
->
filter
.
formatted
.
vlan_id
;
fsp
->
m_ext
.
vlan_tci
=
mask
->
formatted
.
vlan_id
;
fsp
->
h_ext
.
vlan_etype
=
rule
->
filter
.
formatted
.
flex_bytes
;
fsp
->
m_ext
.
vlan_etype
=
mask
->
formatted
.
flex_bytes
;
fsp
->
h_ext
.
data
[
1
]
=
htonl
(
rule
->
filter
.
formatted
.
vm_pool
);
fsp
->
m_ext
.
data
[
1
]
=
htonl
(
mask
->
formatted
.
vm_pool
);
fsp
->
flow_type
|=
FLOW_EXT
;
/* record action */
if
(
rule
->
action
==
IXGBE_FDIR_DROP_QUEUE
)
fsp
->
ring_cookie
=
RX_CLS_FLOW_DISC
;
else
fsp
->
ring_cookie
=
rule
->
action
;
return
0
;
}
static
int
ixgbe_set_rx_ntuple
(
struct
net_device
*
dev
,
struct
ethtool_rx_ntuple
*
cmd
)
static
int
ixgbe_get_ethtool_fdir_all
(
struct
ixgbe_adapter
*
adapter
,
struct
ethtool_rxnfc
*
cmd
,
u32
*
rule_locs
)
{
struct
hlist_node
*
node
,
*
node2
;
struct
ixgbe_fdir_filter
*
rule
;
int
cnt
=
0
;
/* report total rule count */
cmd
->
data
=
(
1024
<<
adapter
->
fdir_pballoc
)
-
2
;
hlist_for_each_entry_safe
(
rule
,
node
,
node2
,
&
adapter
->
fdir_filter_list
,
fdir_node
)
{
if
(
cnt
==
cmd
->
rule_cnt
)
return
-
EMSGSIZE
;
rule_locs
[
cnt
]
=
rule
->
sw_idx
;
cnt
++
;
}
return
0
;
}
static
int
ixgbe_get_rxnfc
(
struct
net_device
*
dev
,
struct
ethtool_rxnfc
*
cmd
,
void
*
rule_locs
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
dev
);
struct
ethtool_rx_ntuple_flow_spec
*
fs
=
&
cmd
->
fs
;
union
ixgbe_atr_input
input_struct
;
struct
ixgbe_atr_input_masks
input_masks
;
int
target_queue
;
int
err
;
int
ret
=
-
EOPNOTSUPP
;
if
(
adapter
->
hw
.
mac
.
type
==
ixgbe_mac_82598EB
)
return
-
EOPNOTSUPP
;
switch
(
cmd
->
cmd
)
{
case
ETHTOOL_GRXRINGS
:
cmd
->
data
=
adapter
->
num_rx_queues
;
ret
=
0
;
break
;
case
ETHTOOL_GRXCLSRLCNT
:
cmd
->
rule_cnt
=
adapter
->
fdir_filter_count
;
ret
=
0
;
break
;
case
ETHTOOL_GRXCLSRULE
:
ret
=
ixgbe_get_ethtool_fdir_entry
(
adapter
,
cmd
);
break
;
case
ETHTOOL_GRXCLSRLALL
:
ret
=
ixgbe_get_ethtool_fdir_all
(
adapter
,
cmd
,
(
u32
*
)
rule_locs
);
break
;
default:
break
;
}
return
ret
;
}
static
int
ixgbe_update_ethtool_fdir_entry
(
struct
ixgbe_adapter
*
adapter
,
struct
ixgbe_fdir_filter
*
input
,
u16
sw_idx
)
{
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
struct
hlist_node
*
node
,
*
node2
,
*
parent
;
struct
ixgbe_fdir_filter
*
rule
;
int
err
=
-
EINVAL
;
parent
=
NULL
;
rule
=
NULL
;
hlist_for_each_entry_safe
(
rule
,
node
,
node2
,
&
adapter
->
fdir_filter_list
,
fdir_node
)
{
/* hash found, or no matching entry */
if
(
rule
->
sw_idx
>=
sw_idx
)
break
;
parent
=
node
;
}
/* if there is an old rule occupying our place remove it */
if
(
rule
&&
(
rule
->
sw_idx
==
sw_idx
))
{
if
(
!
input
||
(
rule
->
filter
.
formatted
.
bkt_hash
!=
input
->
filter
.
formatted
.
bkt_hash
))
{
err
=
ixgbe_fdir_erase_perfect_filter_82599
(
hw
,
&
rule
->
filter
,
sw_idx
);
}
hlist_del
(
&
rule
->
fdir_node
);
kfree
(
rule
);
adapter
->
fdir_filter_count
--
;
}
/*
*
Don't allow programming if the action is a queue greater than
*
the number of online Tx queues.
*
If no input this was a delete, err should be 0 if a rule was
*
successfully found and removed from the list else -EINVAL
*/
if
((
fs
->
action
>=
adapter
->
num_tx_queues
)
||
(
fs
->
action
<
ETHTOOL_RXNTUPLE_ACTION_DROP
))
return
-
EINVAL
;
if
(
!
input
)
return
err
;
memset
(
&
input_struct
,
0
,
sizeof
(
union
ixgbe_atr_input
));
memset
(
&
input_masks
,
0
,
sizeof
(
struct
ixgbe_atr_input_masks
)
);
/* initialize node and set software index */
INIT_HLIST_NODE
(
&
input
->
fdir_node
);
/* record flow type */
switch
(
fs
->
flow_type
)
{
case
IPV4_FLOW
:
input_struct
.
formatted
.
flow_type
=
IXGBE_ATR_FLOW_TYPE_IPV4
;
break
;
/* add filter to the list */
if
(
parent
)
hlist_add_after
(
parent
,
&
input
->
fdir_node
);
else
hlist_add_head
(
&
input
->
fdir_node
,
&
adapter
->
fdir_filter_list
);
/* update counts */
adapter
->
fdir_filter_count
++
;
return
0
;
}
static
int
ixgbe_flowspec_to_flow_type
(
struct
ethtool_rx_flow_spec
*
fsp
,
u8
*
flow_type
)
{
switch
(
fsp
->
flow_type
&
~
FLOW_EXT
)
{
case
TCP_V4_FLOW
:
input_struct
.
formatted
.
flow_type
=
IXGBE_ATR_FLOW_TYPE_TCPV4
;
*
flow_type
=
IXGBE_ATR_FLOW_TYPE_TCPV4
;
break
;
case
UDP_V4_FLOW
:
input_struct
.
formatted
.
flow_type
=
IXGBE_ATR_FLOW_TYPE_UDPV4
;
*
flow_type
=
IXGBE_ATR_FLOW_TYPE_UDPV4
;
break
;
case
SCTP_V4_FLOW
:
input_struct
.
formatted
.
flow_type
=
IXGBE_ATR_FLOW_TYPE_SCTPV4
;
*
flow_type
=
IXGBE_ATR_FLOW_TYPE_SCTPV4
;
break
;
default:
return
-
1
;
}
/* copy vlan tag minus the CFI bit */
if
((
fs
->
vlan_tag
&
0xEFFF
)
||
(
~
fs
->
vlan_tag_mask
&
0xEFFF
))
{
input_struct
.
formatted
.
vlan_id
=
htons
(
fs
->
vlan_tag
&
0xEFFF
);
if
(
!
fs
->
vlan_tag_mask
)
{
input_masks
.
vlan_id_mask
=
htons
(
0xEFFF
);
}
else
{
switch
(
~
fs
->
vlan_tag_mask
&
0xEFFF
)
{
/* all of these are valid vlan-mask values */
case
0xEFFF
:
case
0xE000
:
case
0x0FFF
:
case
0x0000
:
input_masks
.
vlan_id_mask
=
htons
(
~
fs
->
vlan_tag_mask
);
case
IP_USER_FLOW
:
switch
(
fsp
->
h_u
.
usr_ip4_spec
.
proto
)
{
case
IPPROTO_TCP
:
*
flow_type
=
IXGBE_ATR_FLOW_TYPE_TCPV4
;
break
;
case
IPPROTO_UDP
:
*
flow_type
=
IXGBE_ATR_FLOW_TYPE_UDPV4
;
break
;
case
IPPROTO_SCTP
:
*
flow_type
=
IXGBE_ATR_FLOW_TYPE_SCTPV4
;
break
;
case
0
:
if
(
!
fsp
->
m_u
.
usr_ip4_spec
.
proto
)
{
*
flow_type
=
IXGBE_ATR_FLOW_TYPE_IPV4
;
break
;
/* exit with error if vlan-mask is invalid */
default:
e_err
(
drv
,
"Partial VLAN ID or "
"priority mask in vlan-mask is not "
"supported by hardware
\n
"
);
return
-
1
;
}
default:
return
0
;
}
break
;
default:
return
0
;
}
/* make sure we only use the first 2 bytes of user data */
if
((
fs
->
data
&
0xFFFF
)
||
(
~
fs
->
data_mask
&
0xFFFF
))
{
input_struct
.
formatted
.
flex_bytes
=
htons
(
fs
->
data
&
0xFFFF
);
if
(
!
(
fs
->
data_mask
&
0xFFFF
))
{
input_masks
.
flex_mask
=
0xFFFF
;
}
else
if
(
~
fs
->
data_mask
&
0xFFFF
)
{
e_err
(
drv
,
"Partial user-def-mask is not "
"supported by hardware
\n
"
);
return
-
1
;
}
}
return
1
;
}
static
int
ixgbe_add_ethtool_fdir_entry
(
struct
ixgbe_adapter
*
adapter
,
struct
ethtool_rxnfc
*
cmd
)
{
struct
ethtool_rx_flow_spec
*
fsp
=
(
struct
ethtool_rx_flow_spec
*
)
&
cmd
->
fs
;
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
struct
ixgbe_fdir_filter
*
input
;
union
ixgbe_atr_input
mask
;
int
err
;
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
return
-
EOPNOTSUPP
;
/*
* Copy input into formatted structures
*
* These assignments are based on the following logic
* If neither input or mask are set assume value is masked out.
* If input is set, but mask is not mask should default to accept all.
* If input is not set, but mask is set then mask likely results in 0.
* If input is set and mask is set then assign both.
* Don't allow programming if the action is a queue greater than
* the number of online Rx queues.
*/
if
(
fs
->
h_u
.
tcp_ip4_spec
.
ip4src
||
~
fs
->
m_u
.
tcp_ip4_spec
.
ip4src
)
{
input_struct
.
formatted
.
src_ip
[
0
]
=
fs
->
h_u
.
tcp_ip4_spec
.
ip4src
;
if
(
!
fs
->
m_u
.
tcp_ip4_spec
.
ip4src
)
input_masks
.
src_ip_mask
[
0
]
=
0xFFFFFFFF
;
else
input_masks
.
src_ip_mask
[
0
]
=
~
fs
->
m_u
.
tcp_ip4_spec
.
ip4src
;
}
if
(
fs
->
h_u
.
tcp_ip4_spec
.
ip4dst
||
~
fs
->
m_u
.
tcp_ip4_spec
.
ip4dst
)
{
input_struct
.
formatted
.
dst_ip
[
0
]
=
fs
->
h_u
.
tcp_ip4_spec
.
ip4dst
;
if
(
!
fs
->
m_u
.
tcp_ip4_spec
.
ip4dst
)
input_masks
.
dst_ip_mask
[
0
]
=
0xFFFFFFFF
;
else
input_masks
.
dst_ip_mask
[
0
]
=
~
fs
->
m_u
.
tcp_ip4_spec
.
ip4dst
;
if
((
fsp
->
ring_cookie
!=
RX_CLS_FLOW_DISC
)
&&
(
fsp
->
ring_cookie
>=
adapter
->
num_rx_queues
))
return
-
EINVAL
;
/* Don't allow indexes to exist outside of available space */
if
(
fsp
->
location
>=
((
1024
<<
adapter
->
fdir_pballoc
)
-
2
))
{
e_err
(
drv
,
"Location out of range
\n
"
);
return
-
EINVAL
;
}
if
(
fs
->
h_u
.
tcp_ip4_spec
.
psrc
||
~
fs
->
m_u
.
tcp_ip4_spec
.
psrc
)
{
input_struct
.
formatted
.
src_port
=
fs
->
h_u
.
tcp_ip4_spec
.
psrc
;
if
(
!
fs
->
m_u
.
tcp_ip4_spec
.
psrc
)
input_masks
.
src_port_mask
=
0xFFFF
;
else
input_masks
.
src_port_mask
=
~
fs
->
m_u
.
tcp_ip4_spec
.
psrc
;
input
=
kzalloc
(
sizeof
(
*
input
),
GFP_ATOMIC
);
if
(
!
input
)
return
-
ENOMEM
;
memset
(
&
mask
,
0
,
sizeof
(
union
ixgbe_atr_input
));
/* set SW index */
input
->
sw_idx
=
fsp
->
location
;
/* record flow type */
if
(
!
ixgbe_flowspec_to_flow_type
(
fsp
,
&
input
->
filter
.
formatted
.
flow_type
))
{
e_err
(
drv
,
"Unrecognized flow type
\n
"
);
goto
err_out
;
}
if
(
fs
->
h_u
.
tcp_ip4_spec
.
pdst
||
~
fs
->
m_u
.
tcp_ip4_spec
.
pdst
)
{
input_struct
.
formatted
.
dst_port
=
fs
->
h_u
.
tcp_ip4_spec
.
pdst
;
if
(
!
fs
->
m_u
.
tcp_ip4_spec
.
pdst
)
input_masks
.
dst_port_mask
=
0xFFFF
;
else
input_masks
.
dst_port_mask
=
~
fs
->
m_u
.
tcp_ip4_spec
.
pdst
;
mask
.
formatted
.
flow_type
=
IXGBE_ATR_L4TYPE_IPV6_MASK
|
IXGBE_ATR_L4TYPE_MASK
;
if
(
input
->
filter
.
formatted
.
flow_type
==
IXGBE_ATR_FLOW_TYPE_IPV4
)
mask
.
formatted
.
flow_type
&=
IXGBE_ATR_L4TYPE_IPV6_MASK
;
/* Copy input into formatted structures */
input
->
filter
.
formatted
.
src_ip
[
0
]
=
fsp
->
h_u
.
tcp_ip4_spec
.
ip4src
;
mask
.
formatted
.
src_ip
[
0
]
=
fsp
->
m_u
.
tcp_ip4_spec
.
ip4src
;
input
->
filter
.
formatted
.
dst_ip
[
0
]
=
fsp
->
h_u
.
tcp_ip4_spec
.
ip4dst
;
mask
.
formatted
.
dst_ip
[
0
]
=
fsp
->
m_u
.
tcp_ip4_spec
.
ip4dst
;
input
->
filter
.
formatted
.
src_port
=
fsp
->
h_u
.
tcp_ip4_spec
.
psrc
;
mask
.
formatted
.
src_port
=
fsp
->
m_u
.
tcp_ip4_spec
.
psrc
;
input
->
filter
.
formatted
.
dst_port
=
fsp
->
h_u
.
tcp_ip4_spec
.
pdst
;
mask
.
formatted
.
dst_port
=
fsp
->
m_u
.
tcp_ip4_spec
.
pdst
;
if
(
fsp
->
flow_type
&
FLOW_EXT
)
{
input
->
filter
.
formatted
.
vm_pool
=
(
unsigned
char
)
ntohl
(
fsp
->
h_ext
.
data
[
1
]);
mask
.
formatted
.
vm_pool
=
(
unsigned
char
)
ntohl
(
fsp
->
m_ext
.
data
[
1
]);
input
->
filter
.
formatted
.
vlan_id
=
fsp
->
h_ext
.
vlan_tci
;
mask
.
formatted
.
vlan_id
=
fsp
->
m_ext
.
vlan_tci
;
input
->
filter
.
formatted
.
flex_bytes
=
fsp
->
h_ext
.
vlan_etype
;
mask
.
formatted
.
flex_bytes
=
fsp
->
m_ext
.
vlan_etype
;
}
/* determine if we need to drop or route the packet */
if
(
fs
->
action
==
ETHTOOL_RXNTUPLE_ACTION_DROP
)
target_queue
=
MAX_RX_QUEUES
-
1
;
if
(
fs
p
->
ring_cookie
==
RX_CLS_FLOW_DISC
)
input
->
action
=
IXGBE_FDIR_DROP_QUEUE
;
else
target_queue
=
fs
->
action
;
input
->
action
=
fsp
->
ring_cookie
;
spin_lock
(
&
adapter
->
fdir_perfect_lock
);
err
=
ixgbe_fdir_add_perfect_filter_82599
(
&
adapter
->
hw
,
&
input_struct
,
&
input_masks
,
0
,
target_queue
);
if
(
hlist_empty
(
&
adapter
->
fdir_filter_list
))
{
/* save mask and program input mask into HW */
memcpy
(
&
adapter
->
fdir_mask
,
&
mask
,
sizeof
(
mask
));
err
=
ixgbe_fdir_set_input_mask_82599
(
hw
,
&
mask
);
if
(
err
)
{
e_err
(
drv
,
"Error writing mask
\n
"
);
goto
err_out_w_lock
;
}
}
else
if
(
memcmp
(
&
adapter
->
fdir_mask
,
&
mask
,
sizeof
(
mask
)))
{
e_err
(
drv
,
"Only one mask supported per port
\n
"
);
goto
err_out_w_lock
;
}
/* apply mask and compute/store hash */
ixgbe_atr_compute_perfect_hash_82599
(
&
input
->
filter
,
&
mask
);
/* program filters to filter memory */
err
=
ixgbe_fdir_write_perfect_filter_82599
(
hw
,
&
input
->
filter
,
input
->
sw_idx
,
(
input
->
action
==
IXGBE_FDIR_DROP_QUEUE
)
?
IXGBE_FDIR_DROP_QUEUE
:
adapter
->
rx_ring
[
input
->
action
]
->
reg_idx
);
if
(
err
)
goto
err_out_w_lock
;
ixgbe_update_ethtool_fdir_entry
(
adapter
,
input
,
input
->
sw_idx
);
spin_unlock
(
&
adapter
->
fdir_perfect_lock
);
return
err
?
-
1
:
0
;
return
err
;
err_out_w_lock:
spin_unlock
(
&
adapter
->
fdir_perfect_lock
);
err_out:
kfree
(
input
);
return
-
EINVAL
;
}
static
int
ixgbe_del_ethtool_fdir_entry
(
struct
ixgbe_adapter
*
adapter
,
struct
ethtool_rxnfc
*
cmd
)
{
struct
ethtool_rx_flow_spec
*
fsp
=
(
struct
ethtool_rx_flow_spec
*
)
&
cmd
->
fs
;
int
err
;
spin_lock
(
&
adapter
->
fdir_perfect_lock
);
err
=
ixgbe_update_ethtool_fdir_entry
(
adapter
,
NULL
,
fsp
->
location
);
spin_unlock
(
&
adapter
->
fdir_perfect_lock
);
return
err
;
}
static
int
ixgbe_set_rxnfc
(
struct
net_device
*
dev
,
struct
ethtool_rxnfc
*
cmd
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
dev
);
int
ret
=
-
EOPNOTSUPP
;
switch
(
cmd
->
cmd
)
{
case
ETHTOOL_SRXCLSRLINS
:
ret
=
ixgbe_add_ethtool_fdir_entry
(
adapter
,
cmd
);
break
;
case
ETHTOOL_SRXCLSRLDEL
:
ret
=
ixgbe_del_ethtool_fdir_entry
(
adapter
,
cmd
);
break
;
default:
break
;
}
return
ret
;
}
static
const
struct
ethtool_ops
ixgbe_ethtool_ops
=
{
...
...
@@ -2506,7 +2763,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.
set_coalesce
=
ixgbe_set_coalesce
,
.
get_flags
=
ethtool_op_get_flags
,
.
set_flags
=
ixgbe_set_flags
,
.
set_rx_ntuple
=
ixgbe_set_rx_ntuple
,
.
get_rxnfc
=
ixgbe_get_rxnfc
,
.
set_rxnfc
=
ixgbe_set_rxnfc
,
};
void
ixgbe_set_ethtool_ops
(
struct
net_device
*
netdev
)
...
...
drivers/net/ixgbe/ixgbe_main.c
View file @
21e84257
...
...
@@ -54,11 +54,10 @@ char ixgbe_driver_name[] = "ixgbe";
static
const
char
ixgbe_driver_string
[]
=
"Intel(R) 10 Gigabit PCI Express Network Driver"
;
#define MAJ 3
#define MIN
3
#define MIN
4
#define BUILD 8
#define KFIX 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
__stringify(KFIX)
__stringify(BUILD) "-k"
const
char
ixgbe_driver_version
[]
=
DRV_VERSION
;
static
const
char
ixgbe_copyright
[]
=
"Copyright (c) 1999-2011 Intel Corporation."
;
...
...
@@ -1555,9 +1554,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
q_vector
->
eitr
=
adapter
->
rx_eitr_param
;
ixgbe_write_eitr
(
q_vector
);
/* If Flow Director is enabled, set interrupt affinity */
if
((
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
||
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
{
/* If ATR is enabled, set interrupt affinity */
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
{
/*
* Allocate the affinity_hint cpumask, assign the mask
* for this vector, and set our affinity_hint for
...
...
@@ -2468,8 +2466,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
default:
break
;
}
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
||
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
)
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
mask
|=
IXGBE_EIMS_FLOW_DIR
;
IXGBE_WRITE_REG
(
&
adapter
->
hw
,
IXGBE_EIMS
,
mask
);
...
...
@@ -3743,6 +3740,30 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
hw
->
mac
.
ops
.
set_rxpba
(
&
adapter
->
hw
,
num_tc
,
hdrm
,
PBA_STRATEGY_EQUAL
);
}
static
void
ixgbe_fdir_filter_restore
(
struct
ixgbe_adapter
*
adapter
)
{
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
struct
hlist_node
*
node
,
*
node2
;
struct
ixgbe_fdir_filter
*
filter
;
spin_lock
(
&
adapter
->
fdir_perfect_lock
);
if
(
!
hlist_empty
(
&
adapter
->
fdir_filter_list
))
ixgbe_fdir_set_input_mask_82599
(
hw
,
&
adapter
->
fdir_mask
);
hlist_for_each_entry_safe
(
filter
,
node
,
node2
,
&
adapter
->
fdir_filter_list
,
fdir_node
)
{
ixgbe_fdir_write_perfect_filter_82599
(
hw
,
&
filter
->
filter
,
filter
->
sw_idx
,
(
filter
->
action
==
IXGBE_FDIR_DROP_QUEUE
)
?
IXGBE_FDIR_DROP_QUEUE
:
adapter
->
rx_ring
[
filter
->
action
]
->
reg_idx
);
}
spin_unlock
(
&
adapter
->
fdir_perfect_lock
);
}
static
void
ixgbe_configure
(
struct
ixgbe_adapter
*
adapter
)
{
struct
net_device
*
netdev
=
adapter
->
netdev
;
...
...
@@ -3768,7 +3789,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
adapter
->
atr_sample_rate
;
ixgbe_init_fdir_signature_82599
(
hw
,
adapter
->
fdir_pballoc
);
}
else
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
)
{
ixgbe_init_fdir_perfect_82599
(
hw
,
adapter
->
fdir_pballoc
);
ixgbe_init_fdir_perfect_82599
(
&
adapter
->
hw
,
adapter
->
fdir_pballoc
);
ixgbe_fdir_filter_restore
(
adapter
);
}
ixgbe_configure_virtualization
(
adapter
);
...
...
@@ -4145,6 +4168,23 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
ixgbe_clean_tx_ring
(
adapter
->
tx_ring
[
i
]);
}
static
void
ixgbe_fdir_filter_exit
(
struct
ixgbe_adapter
*
adapter
)
{
struct
hlist_node
*
node
,
*
node2
;
struct
ixgbe_fdir_filter
*
filter
;
spin_lock
(
&
adapter
->
fdir_perfect_lock
);
hlist_for_each_entry_safe
(
filter
,
node
,
node2
,
&
adapter
->
fdir_filter_list
,
fdir_node
)
{
hlist_del
(
&
filter
->
fdir_node
);
kfree
(
filter
);
}
adapter
->
fdir_filter_count
=
0
;
spin_unlock
(
&
adapter
->
fdir_perfect_lock
);
}
void
ixgbe_down
(
struct
ixgbe_adapter
*
adapter
)
{
struct
net_device
*
netdev
=
adapter
->
netdev
;
...
...
@@ -4334,15 +4374,13 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
f_fdir
->
mask
=
0
;
/* Flow Director must have RSS enabled */
if
(
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
&&
((
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
||
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))))
{
if
((
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
&&
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
))
{
adapter
->
num_tx_queues
=
f_fdir
->
indices
;
adapter
->
num_rx_queues
=
f_fdir
->
indices
;
ret
=
true
;
}
else
{
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
}
return
ret
;
}
...
...
@@ -4372,12 +4410,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
if
(
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
{
e_info
(
probe
,
"FCoE enabled with RSS
\n
"
);
if
((
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
||
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
ixgbe_set_fdir_queues
(
adapter
);
else
ixgbe_set_rss_queues
(
adapter
);
}
/* adding FCoE rx rings to the end */
f
->
mask
=
adapter
->
num_rx_queues
;
adapter
->
num_rx_queues
+=
f
->
indices
;
...
...
@@ -4670,9 +4708,8 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
int
i
;
bool
ret
=
false
;
if
(
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
&&
((
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
||
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
)))
{
if
((
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
&&
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
))
{
for
(
i
=
0
;
i
<
adapter
->
num_rx_queues
;
i
++
)
adapter
->
rx_ring
[
i
]
->
reg_idx
=
i
;
for
(
i
=
0
;
i
<
adapter
->
num_tx_queues
;
i
++
)
...
...
@@ -4701,8 +4738,7 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
return
false
;
if
(
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
{
if
((
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
||
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
ixgbe_cache_ring_fdir
(
adapter
);
else
ixgbe_cache_ring_rss
(
adapter
);
...
...
@@ -4882,14 +4918,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter
->
flags
&=
~
IXGBE_FLAG_DCB_ENABLED
;
adapter
->
flags
&=
~
IXGBE_FLAG_RSS_ENABLED
;
if
(
adapter
->
flags
&
(
IXGBE_FLAG_FDIR_HASH_CAPABLE
|
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
{
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
{
e_err
(
probe
,
"
Flow Director
is not supported while multiple "
"
ATR
is not supported while multiple "
"queues are disabled. Disabling Flow Director
\n
"
);
}
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
adapter
->
atr_sample_rate
=
0
;
if
(
adapter
->
flags
&
IXGBE_FLAG_SRIOV_ENABLED
)
ixgbe_disable_sriov
(
adapter
);
...
...
@@ -5140,7 +5174,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter
->
atr_sample_rate
=
20
;
adapter
->
ring_feature
[
RING_F_FDIR
].
indices
=
IXGBE_MAX_FDIR_INDICES
;
adapter
->
fdir_pballoc
=
0
;
adapter
->
fdir_pballoc
=
IXGBE_FDIR_PBALLOC_64K
;
#ifdef IXGBE_FCOE
adapter
->
flags
|=
IXGBE_FLAG_FCOE_CAPABLE
;
adapter
->
flags
&=
~
IXGBE_FLAG_FCOE_ENABLED
;
...
...
@@ -5537,6 +5571,8 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_down
(
adapter
);
ixgbe_free_irq
(
adapter
);
ixgbe_fdir_filter_exit
(
adapter
);
ixgbe_free_all_tx_resources
(
adapter
);
ixgbe_free_all_rx_resources
(
adapter
);
...
...
@@ -7676,7 +7712,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* Inform firmware of driver version */
if
(
hw
->
mac
.
ops
.
set_fw_drv_ver
)
hw
->
mac
.
ops
.
set_fw_drv_ver
(
hw
,
MAJ
,
MIN
,
BUILD
,
KFIX
);
hw
->
mac
.
ops
.
set_fw_drv_ver
(
hw
,
MAJ
,
MIN
,
BUILD
,
FW_CEM_UNUSED_VER
);
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev
(
netdev
);
...
...
drivers/net/ixgbe/ixgbe_type.h
View file @
21e84257
...
...
@@ -2056,9 +2056,10 @@ enum {
#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
enum
ixgbe_fdir_pballoc_type
{
IXGBE_FDIR_PBALLOC_64K
=
0
,
IXGBE_FDIR_PBALLOC_128K
,
IXGBE_FDIR_PBALLOC_256K
,
IXGBE_FDIR_PBALLOC_NONE
=
0
,
IXGBE_FDIR_PBALLOC_64K
=
1
,
IXGBE_FDIR_PBALLOC_128K
=
2
,
IXGBE_FDIR_PBALLOC_256K
=
3
,
};
#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
...
...
@@ -2112,7 +2113,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
#define IXGBE_FDIRCMD_
CMD_QUERY_REM_HASH 0x00000007
#define IXGBE_FDIRCMD_
FILTER_VALID 0x00000004
#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
...
...
@@ -2131,6 +2132,8 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIR_INIT_DONE_POLL 10
#define IXGBE_FDIRCMD_CMD_POLL 10
#define IXGBE_FDIR_DROP_QUEUE 127
/* Manageablility Host Interface defines */
#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792
/* Num of bytes in range */
#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448
/* Num of dwords in range */
...
...
@@ -2140,7 +2143,8 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_HDR_LEN 0x4
#define FW_CEM_CMD_DRIVER_INFO 0xDD
#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
#define FW_CEM_CMD_RESERVED 0X0
#define FW_CEM_CMD_RESERVED 0x0
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
...
...
@@ -2350,7 +2354,7 @@ union ixgbe_atr_input {
* src_port - 2 bytes
* dst_port - 2 bytes
* flex_bytes - 2 bytes
*
rsvd0 - 2 bytes - space reserved must be 0.
*
bkt_hash - 2 bytes
*/
struct
{
u8
vm_pool
;
...
...
@@ -2361,7 +2365,7 @@ union ixgbe_atr_input {
__be16
src_port
;
__be16
dst_port
;
__be16
flex_bytes
;
__be16
rsvd0
;
__be16
bkt_hash
;
}
formatted
;
__be32
dword_stream
[
11
];
};
...
...
@@ -2382,16 +2386,6 @@ union ixgbe_atr_hash_dword {
__be32
dword
;
};
struct
ixgbe_atr_input_masks
{
__be16
rsvd0
;
__be16
vlan_id_mask
;
__be32
dst_ip_mask
[
4
];
__be32
src_ip_mask
[
4
];
__be16
src_port_mask
;
__be16
dst_port_mask
;
__be16
flex_mask
;
};
enum
ixgbe_eeprom_type
{
ixgbe_eeprom_uninitialized
=
0
,
ixgbe_eeprom_spi
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment