Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
1b981021
Commit
1b981021
authored
May 25, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge of 'for-linus' branch from
rsync://rsync.kernel.org/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
parents
384f1fcd
f6f3a488
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
3470 additions
and
940 deletions
+3470
-940
drivers/net/amd8111e.c
drivers/net/amd8111e.c
+14
-10
drivers/net/e100.c
drivers/net/e100.c
+139
-26
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000.h
+33
-4
drivers/net/e1000/e1000_ethtool.c
drivers/net/e1000/e1000_ethtool.c
+69
-36
drivers/net/e1000/e1000_hw.c
drivers/net/e1000/e1000_hw.c
+1670
-463
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_hw.h
+547
-23
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_main.c
+891
-256
drivers/net/e1000/e1000_osdep.h
drivers/net/e1000/e1000_osdep.h
+30
-2
drivers/net/e1000/e1000_param.c
drivers/net/e1000/e1000_param.c
+1
-2
drivers/net/ixgb/ixgb.h
drivers/net/ixgb/ixgb.h
+1
-1
drivers/net/ixgb/ixgb_ee.c
drivers/net/ixgb/ixgb_ee.c
+12
-12
drivers/net/ixgb/ixgb_ethtool.c
drivers/net/ixgb/ixgb_ethtool.c
+3
-1
drivers/net/ixgb/ixgb_main.c
drivers/net/ixgb/ixgb_main.c
+53
-100
drivers/net/ixgb/ixgb_osdep.h
drivers/net/ixgb/ixgb_osdep.h
+1
-2
drivers/net/pcnet32.c
drivers/net/pcnet32.c
+5
-2
drivers/net/tulip/media.c
drivers/net/tulip/media.c
+1
-0
No files found.
drivers/net/amd8111e.c
View file @
1b981021
...
...
@@ -738,6 +738,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
short
vtag
;
#endif
int
rx_pkt_limit
=
dev
->
quota
;
unsigned
long
flags
;
do
{
/* process receive packets until we use the quota*/
...
...
@@ -841,18 +842,19 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
/* Receive descriptor is empty now */
dev
->
quota
-=
num_rx_pkt
;
*
budget
-=
num_rx_pkt
;
spin_lock_irqsave
(
&
lp
->
lock
,
flags
);
netif_rx_complete
(
dev
);
/* enable receive interrupt */
writel
(
VAL0
|
RINTEN0
,
mmio
+
INTEN0
);
writel
(
VAL2
|
RDMD0
,
mmio
+
CMD0
);
spin_unlock_irqrestore
(
&
lp
->
lock
,
flags
);
return
0
;
rx_not_empty:
/* Do not call a netif_rx_complete */
dev
->
quota
-=
num_rx_pkt
;
*
budget
-=
num_rx_pkt
;
return
1
;
}
#else
...
...
@@ -1261,18 +1263,20 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
struct
net_device
*
dev
=
(
struct
net_device
*
)
dev_id
;
struct
amd8111e_priv
*
lp
=
netdev_priv
(
dev
);
void
__iomem
*
mmio
=
lp
->
mmio
;
unsigned
int
intr0
;
unsigned
int
intr0
,
intren0
;
unsigned
int
handled
=
1
;
if
(
dev
==
NULL
)
if
(
unlikely
(
dev
==
NULL
)
)
return
IRQ_NONE
;
if
(
regs
)
spin_lock
(
&
lp
->
lock
);
spin_lock
(
&
lp
->
lock
);
/* disabling interrupt */
writel
(
INTREN
,
mmio
+
CMD0
);
/* Read interrupt status */
intr0
=
readl
(
mmio
+
INT0
);
intren0
=
readl
(
mmio
+
INTEN0
);
/* Process all the INT event until INTR bit is clear. */
...
...
@@ -1293,11 +1297,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
/* Schedule a polling routine */
__netif_rx_schedule
(
dev
);
}
else
{
else
if
(
intren0
&
RINTEN0
)
{
printk
(
"************Driver bug! \
interrupt while in poll
\n
"
);
/* Fix by disabl
ing
interrupts */
writel
(
RINT
0
,
mmio
+
INT
0
);
/* Fix by disabl
e receive
interrupts */
writel
(
RINT
EN0
,
mmio
+
INTEN
0
);
}
}
#else
...
...
@@ -1321,7 +1325,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
err_no_interrupt:
writel
(
VAL0
|
INTREN
,
mmio
+
CMD0
);
if
(
regs
)
spin_unlock
(
&
lp
->
lock
);
spin_unlock
(
&
lp
->
lock
);
return
IRQ_RETVAL
(
handled
);
}
...
...
drivers/net/e100.c
View file @
1b981021
...
...
@@ -155,9 +155,9 @@
#define DRV_NAME "e100"
#define DRV_EXT "-NAPI"
#define DRV_VERSION "3.
3.6
-k2"DRV_EXT
#define DRV_VERSION "3.
4.8
-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-200
4
Intel Corporation"
#define DRV_COPYRIGHT "Copyright(c) 1999-200
5
Intel Corporation"
#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
...
...
@@ -210,11 +210,17 @@ static struct pci_device_id e100_id_table[] = {
INTEL_8255X_ETHERNET_DEVICE
(
0x1069
,
6
),
INTEL_8255X_ETHERNET_DEVICE
(
0x106A
,
6
),
INTEL_8255X_ETHERNET_DEVICE
(
0x106B
,
6
),
INTEL_8255X_ETHERNET_DEVICE
(
0x1091
,
7
),
INTEL_8255X_ETHERNET_DEVICE
(
0x1092
,
7
),
INTEL_8255X_ETHERNET_DEVICE
(
0x1093
,
7
),
INTEL_8255X_ETHERNET_DEVICE
(
0x1094
,
7
),
INTEL_8255X_ETHERNET_DEVICE
(
0x1095
,
7
),
INTEL_8255X_ETHERNET_DEVICE
(
0x1209
,
0
),
INTEL_8255X_ETHERNET_DEVICE
(
0x1229
,
0
),
INTEL_8255X_ETHERNET_DEVICE
(
0x2449
,
2
),
INTEL_8255X_ETHERNET_DEVICE
(
0x2459
,
2
),
INTEL_8255X_ETHERNET_DEVICE
(
0x245D
,
2
),
INTEL_8255X_ETHERNET_DEVICE
(
0x27DC
,
7
),
{
0
,
}
};
MODULE_DEVICE_TABLE
(
pci
,
e100_id_table
);
...
...
@@ -269,6 +275,12 @@ enum scb_status {
rus_mask
=
0x3C
,
};
enum
ru_state
{
RU_SUSPENDED
=
0
,
RU_RUNNING
=
1
,
RU_UNINITIALIZED
=
-
1
,
};
enum
scb_stat_ack
{
stat_ack_not_ours
=
0x00
,
stat_ack_sw_gen
=
0x04
,
...
...
@@ -510,7 +522,7 @@ struct nic {
struct
rx
*
rx_to_use
;
struct
rx
*
rx_to_clean
;
struct
rfd
blank_rfd
;
int
ru_running
;
enum
ru_state
ru_running
;
spinlock_t
cb_lock
____cacheline_aligned
;
spinlock_t
cmd_lock
;
...
...
@@ -539,6 +551,7 @@ struct nic {
struct
timer_list
watchdog
;
struct
timer_list
blink_timer
;
struct
mii_if_info
mii
;
struct
work_struct
tx_timeout_task
;
enum
loopback
loopback
;
struct
mem
*
mem
;
...
...
@@ -770,7 +783,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
return
0
;
}
#define E100_WAIT_SCB_TIMEOUT
40
#define E100_WAIT_SCB_TIMEOUT
20000
/* we might have to wait 100ms!!! */
static
inline
int
e100_exec_cmd
(
struct
nic
*
nic
,
u8
cmd
,
dma_addr_t
dma_addr
)
{
unsigned
long
flags
;
...
...
@@ -840,6 +853,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
* because the controller is too busy, so
* let's just queue the command and try again
* when another command is scheduled. */
if
(
err
==
-
ENOSPC
)
{
//request a reset
schedule_work
(
&
nic
->
tx_timeout_task
);
}
break
;
}
else
{
nic
->
cuc_cmd
=
cuc_resume
;
...
...
@@ -884,7 +901,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
static
void
e100_get_defaults
(
struct
nic
*
nic
)
{
struct
param_range
rfds
=
{
.
min
=
64
,
.
max
=
256
,
.
count
=
64
};
struct
param_range
rfds
=
{
.
min
=
16
,
.
max
=
256
,
.
count
=
64
};
struct
param_range
cbs
=
{
.
min
=
64
,
.
max
=
256
,
.
count
=
64
};
pci_read_config_byte
(
nic
->
pdev
,
PCI_REVISION_ID
,
&
nic
->
rev_id
);
...
...
@@ -899,8 +916,9 @@ static void e100_get_defaults(struct nic *nic)
/* Quadwords to DMA into FIFO before starting frame transmit */
nic
->
tx_threshold
=
0xE0
;
nic
->
tx_command
=
cpu_to_le16
(
cb_tx
|
cb_i
|
cb_tx_sf
|
((
nic
->
mac
>=
mac_82558_D101_A4
)
?
cb_cid
:
0
));
/* no interrupt for every tx completion, delay = 256us if not 557*/
nic
->
tx_command
=
cpu_to_le16
(
cb_tx
|
cb_tx_sf
|
((
nic
->
mac
>=
mac_82558_D101_A4
)
?
cb_cid
:
cb_i
));
/* Template for a freshly allocated RFD */
nic
->
blank_rfd
.
command
=
cpu_to_le16
(
cb_el
);
...
...
@@ -964,7 +982,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
if
(
nic
->
flags
&
multicast_all
)
config
->
multicast_all
=
0x1
;
/* 1=accept, 0=no */
if
(
!
(
nic
->
flags
&
wol_magic
))
/* disable WoL when up */
if
(
netif_running
(
nic
->
netdev
)
||
!
(
nic
->
flags
&
wol_magic
))
config
->
magic_packet_disable
=
0x1
;
/* 1=off, 0=on */
if
(
nic
->
mac
>=
mac_82558_D101_A4
)
{
...
...
@@ -1203,7 +1222,9 @@ static void e100_update_stats(struct nic *nic)
}
}
e100_exec_cmd
(
nic
,
cuc_dump_reset
,
0
);
if
(
e100_exec_cmd
(
nic
,
cuc_dump_reset
,
0
))
DPRINTK
(
TX_ERR
,
DEBUG
,
"exec cuc_dump_reset failed
\n
"
);
}
static
void
e100_adjust_adaptive_ifs
(
struct
nic
*
nic
,
int
speed
,
int
duplex
)
...
...
@@ -1279,12 +1300,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct
sk_buff
*
skb
)
{
cb
->
command
=
nic
->
tx_command
;
/* interrupt every 16 packets regardless of delay */
if
((
nic
->
cbs_avail
&
~
15
)
==
nic
->
cbs_avail
)
cb
->
command
|=
cb_i
;
cb
->
u
.
tcb
.
tbd_array
=
cb
->
dma_addr
+
offsetof
(
struct
cb
,
u
.
tcb
.
tbd
);
cb
->
u
.
tcb
.
tcb_byte_count
=
0
;
cb
->
u
.
tcb
.
threshold
=
nic
->
tx_threshold
;
cb
->
u
.
tcb
.
tbd_count
=
1
;
cb
->
u
.
tcb
.
tbd
.
buf_addr
=
cpu_to_le32
(
pci_map_single
(
nic
->
pdev
,
skb
->
data
,
skb
->
len
,
PCI_DMA_TODEVICE
));
// check for mapping failure?
cb
->
u
.
tcb
.
tbd
.
size
=
cpu_to_le16
(
skb
->
len
);
}
...
...
@@ -1297,7 +1321,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
Issue a NOP command followed by a 1us delay before
issuing the Tx command. */
e100_exec_cmd
(
nic
,
cuc_nop
,
0
);
if
(
e100_exec_cmd
(
nic
,
cuc_nop
,
0
))
DPRINTK
(
TX_ERR
,
DEBUG
,
"exec cuc_nop failed
\n
"
);
udelay
(
1
);
}
...
...
@@ -1415,12 +1440,18 @@ static int e100_alloc_cbs(struct nic *nic)
return
0
;
}
static
inline
void
e100_start_receiver
(
struct
nic
*
nic
)
static
inline
void
e100_start_receiver
(
struct
nic
*
nic
,
struct
rx
*
rx
)
{
if
(
!
nic
->
rxs
)
return
;
if
(
RU_SUSPENDED
!=
nic
->
ru_running
)
return
;
/* handle init time starts */
if
(
!
rx
)
rx
=
nic
->
rxs
;
/* (Re)start RU if suspended or idle and RFA is non-NULL */
if
(
!
nic
->
ru_running
&&
nic
->
rx_to_clean
->
skb
)
{
e100_exec_cmd
(
nic
,
ruc_start
,
nic
->
rx_to_clean
->
dma_addr
);
nic
->
ru_running
=
1
;
if
(
rx
->
skb
)
{
e100_exec_cmd
(
nic
,
ruc_start
,
rx
->
dma_addr
);
nic
->
ru_running
=
RU_RUNNING
;
}
}
...
...
@@ -1437,6 +1468,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
rx
->
dma_addr
=
pci_map_single
(
nic
->
pdev
,
rx
->
skb
->
data
,
RFD_BUF_LEN
,
PCI_DMA_BIDIRECTIONAL
);
if
(
pci_dma_mapping_error
(
rx
->
dma_addr
))
{
dev_kfree_skb_any
(
rx
->
skb
);
rx
->
skb
=
0
;
rx
->
dma_addr
=
0
;
return
-
ENOMEM
;
}
/* Link the RFD to end of RFA by linking previous RFD to
* this one, and clearing EL bit of previous. */
if
(
rx
->
prev
->
skb
)
{
...
...
@@ -1471,7 +1509,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
/* If data isn't ready, nothing to indicate */
if
(
unlikely
(
!
(
rfd_status
&
cb_complete
)))
return
-
E
AGAIN
;
return
-
E
NODATA
;
/* Get actual data size */
actual_size
=
le16_to_cpu
(
rfd
->
actual_size
)
&
0x3FFF
;
...
...
@@ -1482,6 +1520,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
pci_unmap_single
(
nic
->
pdev
,
rx
->
dma_addr
,
RFD_BUF_LEN
,
PCI_DMA_FROMDEVICE
);
/* this allows for a fast restart without re-enabling interrupts */
if
(
le16_to_cpu
(
rfd
->
command
)
&
cb_el
)
nic
->
ru_running
=
RU_SUSPENDED
;
/* Pull off the RFD and put the actual data (minus eth hdr) */
skb_reserve
(
skb
,
sizeof
(
struct
rfd
));
skb_put
(
skb
,
actual_size
);
...
...
@@ -1514,20 +1556,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
unsigned
int
work_to_do
)
{
struct
rx
*
rx
;
int
restart_required
=
0
;
struct
rx
*
rx_to_start
=
NULL
;
/* are we already rnr? then pay attention!!! this ensures that
* the state machine progression never allows a start with a
* partially cleaned list, avoiding a race between hardware
* and rx_to_clean when in NAPI mode */
if
(
RU_SUSPENDED
==
nic
->
ru_running
)
restart_required
=
1
;
/* Indicate newly arrived packets */
for
(
rx
=
nic
->
rx_to_clean
;
rx
->
skb
;
rx
=
nic
->
rx_to_clean
=
rx
->
next
)
{
if
(
e100_rx_indicate
(
nic
,
rx
,
work_done
,
work_to_do
))
int
err
=
e100_rx_indicate
(
nic
,
rx
,
work_done
,
work_to_do
);
if
(
-
EAGAIN
==
err
)
{
/* hit quota so have more work to do, restart once
* cleanup is complete */
restart_required
=
0
;
break
;
}
else
if
(
-
ENODATA
==
err
)
break
;
/* No more to clean */
}
/* save our starting point as the place we'll restart the receiver */
if
(
restart_required
)
rx_to_start
=
nic
->
rx_to_clean
;
/* Alloc new skbs to refill list */
for
(
rx
=
nic
->
rx_to_use
;
!
rx
->
skb
;
rx
=
nic
->
rx_to_use
=
rx
->
next
)
{
if
(
unlikely
(
e100_rx_alloc_skb
(
nic
,
rx
)))
break
;
/* Better luck next time (see watchdog) */
}
e100_start_receiver
(
nic
);
if
(
restart_required
)
{
// ack the rnr?
writeb
(
stat_ack_rnr
,
&
nic
->
csr
->
scb
.
stat_ack
);
e100_start_receiver
(
nic
,
rx_to_start
);
if
(
work_done
)
(
*
work_done
)
++
;
}
}
static
void
e100_rx_clean_list
(
struct
nic
*
nic
)
...
...
@@ -1535,6 +1602,8 @@ static void e100_rx_clean_list(struct nic *nic)
struct
rx
*
rx
;
unsigned
int
i
,
count
=
nic
->
params
.
rfds
.
count
;
nic
->
ru_running
=
RU_UNINITIALIZED
;
if
(
nic
->
rxs
)
{
for
(
rx
=
nic
->
rxs
,
i
=
0
;
i
<
count
;
rx
++
,
i
++
)
{
if
(
rx
->
skb
)
{
...
...
@@ -1548,7 +1617,6 @@ static void e100_rx_clean_list(struct nic *nic)
}
nic
->
rx_to_use
=
nic
->
rx_to_clean
=
NULL
;
nic
->
ru_running
=
0
;
}
static
int
e100_rx_alloc_list
(
struct
nic
*
nic
)
...
...
@@ -1557,6 +1625,7 @@ static int e100_rx_alloc_list(struct nic *nic)
unsigned
int
i
,
count
=
nic
->
params
.
rfds
.
count
;
nic
->
rx_to_use
=
nic
->
rx_to_clean
=
NULL
;
nic
->
ru_running
=
RU_UNINITIALIZED
;
if
(
!
(
nic
->
rxs
=
kmalloc
(
sizeof
(
struct
rx
)
*
count
,
GFP_ATOMIC
)))
return
-
ENOMEM
;
...
...
@@ -1572,6 +1641,7 @@ static int e100_rx_alloc_list(struct nic *nic)
}
nic
->
rx_to_use
=
nic
->
rx_to_clean
=
nic
->
rxs
;
nic
->
ru_running
=
RU_SUSPENDED
;
return
0
;
}
...
...
@@ -1593,7 +1663,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
/* We hit Receive No Resource (RNR); restart RU after cleaning */
if
(
stat_ack
&
stat_ack_rnr
)
nic
->
ru_running
=
0
;
nic
->
ru_running
=
RU_SUSPENDED
;
e100_disable_irq
(
nic
);
netif_rx_schedule
(
netdev
);
...
...
@@ -1663,6 +1733,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
return
0
;
}
#ifdef CONFIG_PM
static
int
e100_asf
(
struct
nic
*
nic
)
{
/* ASF can be enabled from eeprom */
...
...
@@ -1671,6 +1742,7 @@ static int e100_asf(struct nic *nic)
!
(
nic
->
eeprom
[
eeprom_config_asf
]
&
eeprom_gcl
)
&&
((
nic
->
eeprom
[
eeprom_smbus_addr
]
&
0xFF
)
!=
0xFE
));
}
#endif
static
int
e100_up
(
struct
nic
*
nic
)
{
...
...
@@ -1683,13 +1755,16 @@ static int e100_up(struct nic *nic)
if
((
err
=
e100_hw_init
(
nic
)))
goto
err_clean_cbs
;
e100_set_multicast_list
(
nic
->
netdev
);
e100_start_receiver
(
nic
);
e100_start_receiver
(
nic
,
0
);
mod_timer
(
&
nic
->
watchdog
,
jiffies
);
if
((
err
=
request_irq
(
nic
->
pdev
->
irq
,
e100_intr
,
SA_SHIRQ
,
nic
->
netdev
->
name
,
nic
->
netdev
)))
goto
err_no_irq
;
e100_enable_irq
(
nic
);
netif_wake_queue
(
nic
->
netdev
);
netif_poll_enable
(
nic
->
netdev
);
/* enable ints _after_ enabling poll, preventing a race between
* disable ints+schedule */
e100_enable_irq
(
nic
);
return
0
;
err_no_irq:
...
...
@@ -1703,11 +1778,13 @@ static int e100_up(struct nic *nic)
static
void
e100_down
(
struct
nic
*
nic
)
{
/* wait here for poll to complete */
netif_poll_disable
(
nic
->
netdev
);
netif_stop_queue
(
nic
->
netdev
);
e100_hw_reset
(
nic
);
free_irq
(
nic
->
pdev
->
irq
,
nic
->
netdev
);
del_timer_sync
(
&
nic
->
watchdog
);
netif_carrier_off
(
nic
->
netdev
);
netif_stop_queue
(
nic
->
netdev
);
e100_clean_cbs
(
nic
);
e100_rx_clean_list
(
nic
);
}
...
...
@@ -1716,6 +1793,15 @@ static void e100_tx_timeout(struct net_device *netdev)
{
struct
nic
*
nic
=
netdev_priv
(
netdev
);
/* Reset outside of interrupt context, to avoid request_irq
* in interrupt context */
schedule_work
(
&
nic
->
tx_timeout_task
);
}
static
void
e100_tx_timeout_task
(
struct
net_device
*
netdev
)
{
struct
nic
*
nic
=
netdev_priv
(
netdev
);
DPRINTK
(
TX_ERR
,
DEBUG
,
"scb.status=0x%02X
\n
"
,
readb
(
&
nic
->
csr
->
scb
.
status
));
e100_down
(
netdev_priv
(
netdev
));
...
...
@@ -1749,7 +1835,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
mdio_write
(
nic
->
netdev
,
nic
->
mii
.
phy_id
,
MII_BMCR
,
BMCR_LOOPBACK
);
e100_start_receiver
(
nic
);
e100_start_receiver
(
nic
,
0
);
if
(
!
(
skb
=
dev_alloc_skb
(
ETH_DATA_LEN
)))
{
err
=
-
ENOMEM
;
...
...
@@ -1869,7 +1955,6 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
else
nic
->
flags
&=
~
wol_magic
;
pci_enable_wake
(
nic
->
pdev
,
0
,
nic
->
flags
&
(
wol_magic
|
e100_asf
(
nic
)));
e100_exec_cb
(
nic
,
NULL
,
e100_configure
);
return
0
;
...
...
@@ -2223,6 +2308,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
e100_get_defaults
(
nic
);
/* locks must be initialized before calling hw_reset */
spin_lock_init
(
&
nic
->
cb_lock
);
spin_lock_init
(
&
nic
->
cmd_lock
);
...
...
@@ -2240,6 +2326,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic
->
blink_timer
.
function
=
e100_blink_led
;
nic
->
blink_timer
.
data
=
(
unsigned
long
)
nic
;
INIT_WORK
(
&
nic
->
tx_timeout_task
,
(
void
(
*
)(
void
*
))
e100_tx_timeout_task
,
netdev
);
if
((
err
=
e100_alloc
(
nic
)))
{
DPRINTK
(
PROBE
,
ERR
,
"Cannot alloc driver memory, aborting.
\n
"
);
goto
err_out_iounmap
;
...
...
@@ -2263,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
(
nic
->
eeprom
[
eeprom_id
]
&
eeprom_id_wol
))
nic
->
flags
|=
wol_magic
;
pci_enable_wake
(
pdev
,
0
,
nic
->
flags
&
(
wol_magic
|
e100_asf
(
nic
)));
/* ack any pending wake events, disable PME */
pci_enable_wake
(
pdev
,
0
,
0
);
strcpy
(
netdev
->
name
,
"eth%d"
);
if
((
err
=
register_netdev
(
netdev
)))
{
...
...
@@ -2335,7 +2425,10 @@ static int e100_resume(struct pci_dev *pdev)
pci_set_power_state
(
pdev
,
PCI_D0
);
pci_restore_state
(
pdev
);
e100_hw_init
(
nic
);
/* ack any pending wake events, disable PME */
pci_enable_wake
(
pdev
,
0
,
0
);
if
(
e100_hw_init
(
nic
))
DPRINTK
(
HW
,
ERR
,
"e100_hw_init failed
\n
"
);
netif_device_attach
(
netdev
);
if
(
netif_running
(
netdev
))
...
...
@@ -2345,6 +2438,21 @@ static int e100_resume(struct pci_dev *pdev)
}
#endif
static
void
e100_shutdown
(
struct
device
*
dev
)
{
struct
pci_dev
*
pdev
=
container_of
(
dev
,
struct
pci_dev
,
dev
);
struct
net_device
*
netdev
=
pci_get_drvdata
(
pdev
);
struct
nic
*
nic
=
netdev_priv
(
netdev
);
#ifdef CONFIG_PM
pci_enable_wake
(
pdev
,
0
,
nic
->
flags
&
(
wol_magic
|
e100_asf
(
nic
)));
#else
pci_enable_wake
(
pdev
,
0
,
nic
->
flags
&
(
wol_magic
));
#endif
}
static
struct
pci_driver
e100_driver
=
{
.
name
=
DRV_NAME
,
.
id_table
=
e100_id_table
,
...
...
@@ -2354,6 +2462,11 @@ static struct pci_driver e100_driver = {
.
suspend
=
e100_suspend
,
.
resume
=
e100_resume
,
#endif
.
driver
=
{
.
shutdown
=
e100_shutdown
,
}
};
static
int
__init
e100_init_module
(
void
)
...
...
drivers/net/e1000/e1000.h
View file @
1b981021
/*******************************************************************************
Copyright(c) 1999 - 200
4
Intel Corporation. All rights reserved.
Copyright(c) 1999 - 200
5
Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
...
...
@@ -112,6 +112,8 @@ struct e1000_adapter;
#define E1000_MAX_82544_RXD 4096
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128
/* Used for packet split */
#define E1000_RXBUFFER_256 256
/* Used for packet split */
#define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192
...
...
@@ -138,7 +140,7 @@ struct e1000_adapter;
#define E1000_RX_BUFFER_WRITE 16
/* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM
0x0004
#define E1000_EEPROM_82544_APM
0x0400
#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
...
...
@@ -146,6 +148,10 @@ struct e1000_adapter;
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
#define E1000_MNG_VLAN_NONE -1
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
/* only works for sizes that are powers of 2 */
#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
...
...
@@ -159,6 +165,9 @@ struct e1000_buffer {
uint16_t
next_to_watch
;
};
struct
e1000_ps_page
{
struct
page
*
ps_page
[
MAX_PS_BUFFERS
];
};
struct
e1000_ps_page_dma
{
uint64_t
ps_page_dma
[
MAX_PS_BUFFERS
];
};
struct
e1000_desc_ring
{
/* pointer to the descriptor ring memory */
void
*
desc
;
...
...
@@ -174,12 +183,19 @@ struct e1000_desc_ring {
unsigned
int
next_to_clean
;
/* array of buffer information structs */
struct
e1000_buffer
*
buffer_info
;
/* arrays of page information for packet split */
struct
e1000_ps_page
*
ps_page
;
struct
e1000_ps_page_dma
*
ps_page_dma
;
};
#define E1000_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
...
...
@@ -192,6 +208,7 @@ struct e1000_adapter {
struct
timer_list
watchdog_timer
;
struct
timer_list
phy_info_timer
;
struct
vlan_group
*
vlgrp
;
uint16_t
mng_vlan_id
;
uint32_t
bd_number
;
uint32_t
rx_buffer_len
;
uint32_t
part_num
;
...
...
@@ -228,14 +245,23 @@ struct e1000_adapter {
boolean_t
detect_tx_hung
;
/* RX */
#ifdef CONFIG_E1000_NAPI
boolean_t
(
*
clean_rx
)
(
struct
e1000_adapter
*
adapter
,
int
*
work_done
,
int
work_to_do
);
#else
boolean_t
(
*
clean_rx
)
(
struct
e1000_adapter
*
adapter
);
#endif
void
(
*
alloc_rx_buf
)
(
struct
e1000_adapter
*
adapter
);
struct
e1000_desc_ring
rx_ring
;
uint64_t
hw_csum_err
;
uint64_t
hw_csum_good
;
uint32_t
rx_int_delay
;
uint32_t
rx_abs_int_delay
;
boolean_t
rx_csum
;
boolean_t
rx_ps
;
uint32_t
gorcl
;
uint64_t
gorcl_old
;
uint16_t
rx_ps_bsize0
;
/* Interrupt Throttle Rate */
uint32_t
itr
;
...
...
@@ -257,5 +283,8 @@ struct e1000_adapter {
int
msg_enable
;
#ifdef CONFIG_PCI_MSI
boolean_t
have_msi
;
#endif
};
#endif
/* _E1000_H_ */
drivers/net/e1000/e1000_ethtool.c
View file @
1b981021
/*******************************************************************************
Copyright(c) 1999 - 200
4
Intel Corporation. All rights reserved.
Copyright(c) 1999 - 200
5
Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
...
...
@@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{
"rx_crc_errors"
,
E1000_STAT
(
net_stats
.
rx_crc_errors
)
},
{
"rx_frame_errors"
,
E1000_STAT
(
net_stats
.
rx_frame_errors
)
},
{
"rx_fifo_errors"
,
E1000_STAT
(
net_stats
.
rx_fifo_errors
)
},
{
"rx_no_buffer_count"
,
E1000_STAT
(
stats
.
rnbc
)
},
{
"rx_missed_errors"
,
E1000_STAT
(
net_stats
.
rx_missed_errors
)
},
{
"tx_aborted_errors"
,
E1000_STAT
(
net_stats
.
tx_aborted_errors
)
},
{
"tx_carrier_errors"
,
E1000_STAT
(
net_stats
.
tx_carrier_errors
)
},
...
...
@@ -593,7 +594,7 @@ e1000_set_ringparam(struct net_device *netdev,
tx_old
=
adapter
->
tx_ring
;
rx_old
=
adapter
->
rx_ring
;
if
((
ring
->
rx_mini_pending
)
||
(
ring
->
rx_jumbo_pending
))
if
((
ring
->
rx_mini_pending
)
||
(
ring
->
rx_jumbo_pending
))
return
-
EINVAL
;
if
(
netif_running
(
adapter
->
netdev
))
...
...
@@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
* test failed.
*/
adapter
->
test_icr
=
0
;
E1000_WRITE_REG
(
&
adapter
->
hw
,
IMC
,
(
~
mask
&
0x00007FFF
));
E1000_WRITE_REG
(
&
adapter
->
hw
,
ICS
,
(
~
mask
&
0x00007FFF
));
E1000_WRITE_REG
(
&
adapter
->
hw
,
IMC
,
~
mask
&
0x00007FFF
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
ICS
,
~
mask
&
0x00007FFF
);
msec_delay
(
10
);
if
(
adapter
->
test_icr
)
{
...
...
@@ -919,7 +918,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
txdr
->
count
=
80
;
if
(
!
txdr
->
count
)
txdr
->
count
=
E1000_DEFAULT_TXD
;
size
=
txdr
->
count
*
sizeof
(
struct
e1000_buffer
);
if
(
!
(
txdr
->
buffer_info
=
kmalloc
(
size
,
GFP_KERNEL
)))
{
...
...
@@ -974,7 +974,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rxdr
->
count
=
80
;
if
(
!
rxdr
->
count
)
rxdr
->
count
=
E1000_DEFAULT_RXD
;
size
=
rxdr
->
count
*
sizeof
(
struct
e1000_buffer
);
if
(
!
(
rxdr
->
buffer_info
=
kmalloc
(
size
,
GFP_KERNEL
)))
{
...
...
@@ -1310,31 +1311,62 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
struct
e1000_desc_ring
*
txdr
=
&
adapter
->
test_tx_ring
;
struct
e1000_desc_ring
*
rxdr
=
&
adapter
->
test_rx_ring
;
struct
pci_dev
*
pdev
=
adapter
->
pdev
;
int
i
,
ret_val
;
int
i
,
j
,
k
,
l
,
lc
,
good_cnt
,
ret_val
=
0
;
unsigned
long
time
;
E1000_WRITE_REG
(
&
adapter
->
hw
,
RDT
,
rxdr
->
count
-
1
);
for
(
i
=
0
;
i
<
64
;
i
++
)
{
e1000_create_lbtest_frame
(
txdr
->
buffer_info
[
i
].
skb
,
1024
);
pci_dma_sync_single_for_device
(
pdev
,
txdr
->
buffer_info
[
i
].
dma
,
txdr
->
buffer_info
[
i
].
length
,
/* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
if
(
rxdr
->
count
<=
txdr
->
count
)
lc
=
((
txdr
->
count
/
64
)
*
2
)
+
1
;
else
lc
=
((
rxdr
->
count
/
64
)
*
2
)
+
1
;
k
=
l
=
0
;
for
(
j
=
0
;
j
<=
lc
;
j
++
)
{
/* loop count loop */
for
(
i
=
0
;
i
<
64
;
i
++
)
{
/* send the packets */
e1000_create_lbtest_frame
(
txdr
->
buffer_info
[
i
].
skb
,
1024
);
pci_dma_sync_single_for_device
(
pdev
,
txdr
->
buffer_info
[
k
].
dma
,
txdr
->
buffer_info
[
k
].
length
,
PCI_DMA_TODEVICE
);
if
(
unlikely
(
++
k
==
txdr
->
count
))
k
=
0
;
}
E1000_WRITE_REG
(
&
adapter
->
hw
,
TDT
,
i
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
TDT
,
k
);
msec_delay
(
200
);
i
=
0
;
do
{
pci_dma_sync_single_for_cpu
(
pdev
,
rxdr
->
buffer_info
[
i
].
dma
,
rxdr
->
buffer_info
[
i
].
length
,
time
=
jiffies
;
/* set the start time for the receive */
good_cnt
=
0
;
do
{
/* receive the sent packets */
pci_dma_sync_single_for_cpu
(
pdev
,
rxdr
->
buffer_info
[
l
].
dma
,
rxdr
->
buffer_info
[
l
].
length
,
PCI_DMA_FROMDEVICE
);
ret_val
=
e1000_check_lbtest_frame
(
rxdr
->
buffer_info
[
i
].
skb
,
ret_val
=
e1000_check_lbtest_frame
(
rxdr
->
buffer_info
[
l
].
skb
,
1024
);
i
++
;
}
while
(
ret_val
!=
0
&&
i
<
64
);
if
(
!
ret_val
)
good_cnt
++
;
if
(
unlikely
(
++
l
==
rxdr
->
count
))
l
=
0
;
/* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
}
while
(
good_cnt
<
64
&&
jiffies
<
(
time
+
20
));
if
(
good_cnt
!=
64
)
{
ret_val
=
13
;
/* ret_val is the same as mis-compare */
break
;
}
if
(
jiffies
>=
(
time
+
2
))
{
ret_val
=
14
;
/* error code for time out error */
break
;
}
}
/* end loop count loop */
return
ret_val
;
}
...
...
@@ -1354,13 +1386,12 @@ static int
e1000_link_test
(
struct
e1000_adapter
*
adapter
,
uint64_t
*
data
)
{
*
data
=
0
;
if
(
adapter
->
hw
.
media_type
==
e1000_media_type_internal_serdes
)
{
int
i
=
0
;
adapter
->
hw
.
serdes_link_down
=
TRUE
;
/*
on some blade server designs link establishment */
/* could take as long as 2-3 minutes.
*/
/*
On some blade server designs, link establishment
* could take as long as 2-3 minutes
*/
do
{
e1000_check_for_link
(
&
adapter
->
hw
);
if
(
adapter
->
hw
.
serdes_link_down
==
FALSE
)
...
...
@@ -1371,6 +1402,8 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
*
data
=
1
;
}
else
{
e1000_check_for_link
(
&
adapter
->
hw
);
if
(
adapter
->
hw
.
autoneg
)
/* if auto_neg is set wait for it */
msec_delay
(
4000
);
if
(
!
(
E1000_READ_REG
(
&
adapter
->
hw
,
STATUS
)
&
E1000_STATUS_LU
))
{
*
data
=
1
;
...
...
drivers/net/e1000/e1000_hw.c
View file @
1b981021
/*******************************************************************************
Copyright(c) 1999 - 200
4
Intel Corporation. All rights reserved.
Copyright(c) 1999 - 200
5
Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
...
...
@@ -63,10 +63,11 @@ static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count);
static
int32_t
e1000_acquire_eeprom
(
struct
e1000_hw
*
hw
);
static
void
e1000_release_eeprom
(
struct
e1000_hw
*
hw
);
static
void
e1000_standby_eeprom
(
struct
e1000_hw
*
hw
);
static
int32_t
e1000_id_led_init
(
struct
e1000_hw
*
hw
);
static
int32_t
e1000_set_vco_speed
(
struct
e1000_hw
*
hw
);
static
int32_t
e1000_polarity_reversal_workaround
(
struct
e1000_hw
*
hw
);
static
int32_t
e1000_set_phy_mode
(
struct
e1000_hw
*
hw
);
static
int32_t
e1000_host_if_read_cookie
(
struct
e1000_hw
*
hw
,
uint8_t
*
buffer
);
static
uint8_t
e1000_calculate_mng_checksum
(
char
*
buffer
,
uint32_t
length
);
/* IGP cable length table */
static
const
...
...
@@ -80,6 +81,17 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
100
,
100
,
100
,
100
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
110
,
120
,
120
,
120
,
120
,
120
,
120
,
120
,
120
,
120
,
120
};
static
const
uint16_t
e1000_igp_2_cable_length_table
[
IGP02E1000_AGC_LENGTH_TABLE_SIZE
]
=
{
8
,
13
,
17
,
19
,
21
,
23
,
25
,
27
,
29
,
31
,
33
,
35
,
37
,
39
,
41
,
43
,
22
,
24
,
27
,
30
,
32
,
35
,
37
,
40
,
42
,
44
,
47
,
49
,
51
,
54
,
56
,
58
,
32
,
35
,
38
,
41
,
44
,
47
,
50
,
53
,
55
,
58
,
61
,
63
,
66
,
69
,
71
,
74
,
43
,
47
,
51
,
54
,
58
,
61
,
64
,
67
,
71
,
74
,
77
,
80
,
82
,
85
,
88
,
90
,
57
,
62
,
66
,
70
,
74
,
77
,
81
,
85
,
88
,
91
,
94
,
97
,
100
,
103
,
106
,
108
,
73
,
78
,
82
,
87
,
91
,
95
,
98
,
102
,
105
,
109
,
112
,
114
,
117
,
119
,
122
,
124
,
91
,
96
,
101
,
105
,
109
,
113
,
116
,
119
,
122
,
125
,
127
,
128
,
128
,
128
,
128
,
128
,
108
,
113
,
117
,
121
,
124
,
127
,
128
,
128
,
128
,
128
,
128
,
128
,
128
,
128
,
128
,
128
};
/******************************************************************************
* Set the phy type member in the hw struct.
...
...
@@ -91,10 +103,14 @@ e1000_set_phy_type(struct e1000_hw *hw)
{
DEBUGFUNC
(
"e1000_set_phy_type"
);
if
(
hw
->
mac_type
==
e1000_undefined
)
return
-
E1000_ERR_PHY_TYPE
;
switch
(
hw
->
phy_id
)
{
case
M88E1000_E_PHY_ID
:
case
M88E1000_I_PHY_ID
:
case
M88E1011_I_PHY_ID
:
case
M88E1111_I_PHY_ID
:
hw
->
phy_type
=
e1000_phy_m88
;
break
;
case
IGP01E1000_I_PHY_ID
:
...
...
@@ -128,7 +144,6 @@ e1000_phy_init_script(struct e1000_hw *hw)
DEBUGFUNC
(
"e1000_phy_init_script"
);
if
(
hw
->
phy_init_script
)
{
msec_delay
(
20
);
...
...
@@ -271,6 +286,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
case
E1000_DEV_ID_82546GB_FIBER
:
case
E1000_DEV_ID_82546GB_SERDES
:
case
E1000_DEV_ID_82546GB_PCIE
:
case
E1000_DEV_ID_82546GB_QUAD_COPPER
:
hw
->
mac_type
=
e1000_82546_rev_3
;
break
;
case
E1000_DEV_ID_82541EI
:
...
...
@@ -289,12 +305,19 @@ e1000_set_mac_type(struct e1000_hw *hw)
case
E1000_DEV_ID_82547GI
:
hw
->
mac_type
=
e1000_82547_rev_2
;
break
;
case
E1000_DEV_ID_82573E
:
case
E1000_DEV_ID_82573E_IAMT
:
hw
->
mac_type
=
e1000_82573
;
break
;
default:
/* Should never have loaded on this device */
return
-
E1000_ERR_MAC_TYPE
;
}
switch
(
hw
->
mac_type
)
{
case
e1000_82573
:
hw
->
eeprom_semaphore_present
=
TRUE
;
/* fall through */
case
e1000_82541
:
case
e1000_82547
:
case
e1000_82541_rev_2
:
...
...
@@ -360,6 +383,9 @@ e1000_reset_hw(struct e1000_hw *hw)
uint32_t
icr
;
uint32_t
manc
;
uint32_t
led_ctrl
;
uint32_t
timeout
;
uint32_t
extcnf_ctrl
;
int32_t
ret_val
;
DEBUGFUNC
(
"e1000_reset_hw"
);
...
...
@@ -369,6 +395,15 @@ e1000_reset_hw(struct e1000_hw *hw)
e1000_pci_clear_mwi
(
hw
);
}
if
(
hw
->
bus_type
==
e1000_bus_type_pci_express
)
{
/* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
if
(
e1000_disable_pciex_master
(
hw
)
!=
E1000_SUCCESS
)
{
DEBUGOUT
(
"PCI-E Master disable polling has failed.
\n
"
);
}
}
/* Clear interrupt mask to stop board from generating interrupts */
DEBUGOUT
(
"Masking off all interrupts
\n
"
);
E1000_WRITE_REG
(
hw
,
IMC
,
0xffffffff
);
...
...
@@ -393,10 +428,32 @@ e1000_reset_hw(struct e1000_hw *hw)
/* Must reset the PHY before resetting the MAC */
if
((
hw
->
mac_type
==
e1000_82541
)
||
(
hw
->
mac_type
==
e1000_82547
))
{
E1000_WRITE_REG
_IO
(
hw
,
CTRL
,
(
ctrl
|
E1000_CTRL_PHY_RST
));
E1000_WRITE_REG
(
hw
,
CTRL
,
(
ctrl
|
E1000_CTRL_PHY_RST
));
msec_delay
(
5
);
}
/* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset. */
if
(
hw
->
mac_type
==
e1000_82573
)
{
timeout
=
10
;
extcnf_ctrl
=
E1000_READ_REG
(
hw
,
EXTCNF_CTRL
);
extcnf_ctrl
|=
E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP
;
do
{
E1000_WRITE_REG
(
hw
,
EXTCNF_CTRL
,
extcnf_ctrl
);
extcnf_ctrl
=
E1000_READ_REG
(
hw
,
EXTCNF_CTRL
);
if
(
extcnf_ctrl
&
E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP
)
break
;
else
extcnf_ctrl
|=
E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP
;
msec_delay
(
2
);
timeout
--
;
}
while
(
timeout
);
}
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
...
...
@@ -450,6 +507,18 @@ e1000_reset_hw(struct e1000_hw *hw)
/* Wait for EEPROM reload */
msec_delay
(
20
);
break
;
case
e1000_82573
:
udelay
(
10
);
ctrl_ext
=
E1000_READ_REG
(
hw
,
CTRL_EXT
);
ctrl_ext
|=
E1000_CTRL_EXT_EE_RST
;
E1000_WRITE_REG
(
hw
,
CTRL_EXT
,
ctrl_ext
);
E1000_WRITE_FLUSH
(
hw
);
/* fall through */
ret_val
=
e1000_get_auto_rd_done
(
hw
);
if
(
ret_val
)
/* We don't want to continue accessing MAC registers. */
return
ret_val
;
break
;
default:
/* Wait for EEPROM reload (it happens automatically) */
msec_delay
(
5
);
...
...
@@ -457,7 +526,7 @@ e1000_reset_hw(struct e1000_hw *hw)
}
/* Disable HW ARPs on ASF enabled adapters */
if
(
hw
->
mac_type
>=
e1000_82540
)
{
if
(
hw
->
mac_type
>=
e1000_82540
&&
hw
->
mac_type
<=
e1000_82547_rev_2
)
{
manc
=
E1000_READ_REG
(
hw
,
MANC
);
manc
&=
~
(
E1000_MANC_ARP_EN
);
E1000_WRITE_REG
(
hw
,
MANC
,
manc
);
...
...
@@ -510,6 +579,8 @@ e1000_init_hw(struct e1000_hw *hw)
uint16_t
pcix_stat_hi_word
;
uint16_t
cmd_mmrbc
;
uint16_t
stat_mmrbc
;
uint32_t
mta_size
;
DEBUGFUNC
(
"e1000_init_hw"
);
/* Initialize Identification LED */
...
...
@@ -524,8 +595,8 @@ e1000_init_hw(struct e1000_hw *hw)
/* Disabling VLAN filtering. */
DEBUGOUT
(
"Initializing the IEEE VLAN
\n
"
);
if
(
hw
->
mac_type
<
e1000_82545_rev_3
)
E1000_WRITE_REG
(
hw
,
VET
,
0
);
e1000_clear_vfta
(
hw
);
/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
...
...
@@ -553,14 +624,16 @@ e1000_init_hw(struct e1000_hw *hw)
/* Zero out the Multicast HASH table */
DEBUGOUT
(
"Zeroing the MTA
\n
"
);
for
(
i
=
0
;
i
<
E1000_MC_TBL_SIZE
;
i
++
)
mta_size
=
E1000_MC_TBL_SIZE
;
for
(
i
=
0
;
i
<
mta_size
;
i
++
)
E1000_WRITE_REG_ARRAY
(
hw
,
MTA
,
i
,
0
);
/* Set the PCI priority bit correctly in the CTRL register. This
* determines if the adapter gives priority to receives, or if it
* gives equal priority to transmits and receives.
* gives equal priority to transmits and receives. Valid only on
* 82542 and 82543 silicon.
*/
if
(
hw
->
dma_fairness
)
{
if
(
hw
->
dma_fairness
&&
hw
->
mac_type
<=
e1000_82543
)
{
ctrl
=
E1000_READ_REG
(
hw
,
CTRL
);
E1000_WRITE_REG
(
hw
,
CTRL
,
ctrl
|
E1000_CTRL_PRIOR
);
}
...
...
@@ -598,9 +671,21 @@ e1000_init_hw(struct e1000_hw *hw)
if
(
hw
->
mac_type
>
e1000_82544
)
{
ctrl
=
E1000_READ_REG
(
hw
,
TXDCTL
);
ctrl
=
(
ctrl
&
~
E1000_TXDCTL_WTHRESH
)
|
E1000_TXDCTL_FULL_TX_DESC_WB
;
switch
(
hw
->
mac_type
)
{
default:
break
;
case
e1000_82573
:
ctrl
|=
E1000_TXDCTL_COUNT_DESC
;
break
;
}
E1000_WRITE_REG
(
hw
,
TXDCTL
,
ctrl
);
}
if
(
hw
->
mac_type
==
e1000_82573
)
{
e1000_enable_tx_pkt_filtering
(
hw
);
}
/* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
...
...
@@ -679,7 +764,7 @@ e1000_setup_link(struct e1000_hw *hw)
* control setting, then the variable hw->fc will
* be initialized based on a value in the EEPROM.
*/
if
(
e1000_read_eeprom
(
hw
,
EEPROM_INIT_CONTROL2_REG
,
1
,
&
eeprom_data
)
<
0
)
{
if
(
e1000_read_eeprom
(
hw
,
EEPROM_INIT_CONTROL2_REG
,
1
,
&
eeprom_data
))
{
DEBUGOUT
(
"EEPROM Read Error
\n
"
);
return
-
E1000_ERR_EEPROM
;
}
...
...
@@ -736,6 +821,7 @@ e1000_setup_link(struct e1000_hw *hw)
E1000_WRITE_REG
(
hw
,
FCAL
,
FLOW_CONTROL_ADDRESS_LOW
);
E1000_WRITE_REG
(
hw
,
FCAH
,
FLOW_CONTROL_ADDRESS_HIGH
);
E1000_WRITE_REG
(
hw
,
FCT
,
FLOW_CONTROL_TYPE
);
E1000_WRITE_REG
(
hw
,
FCTTV
,
hw
->
fc_pause_time
);
/* Set the flow control receive threshold registers. Normally,
...
...
@@ -906,20 +992,18 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
}
/******************************************************************************
*
Detects which PHY is present and the speed and duplex
*
Make sure we have a valid PHY and change PHY mode before link setup.
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
static
int32_t
e1000_
setup_copper_link
(
struct
e1000_hw
*
hw
)
e1000_
copper_link_preconfig
(
struct
e1000_hw
*
hw
)
{
uint32_t
ctrl
;
uint32_t
led_ctrl
;
int32_t
ret_val
;
uint16_t
i
;
uint16_t
phy_data
;
DEBUGFUNC
(
"e1000_
setup_copper_link
"
);
DEBUGFUNC
(
"e1000_
copper_link_preconfig
"
);
ctrl
=
E1000_READ_REG
(
hw
,
CTRL
);
/* With 82543, we need to force speed and duplex on the MAC equal to what
...
...
@@ -933,7 +1017,9 @@ e1000_setup_copper_link(struct e1000_hw *hw)
}
else
{
ctrl
|=
(
E1000_CTRL_FRCSPD
|
E1000_CTRL_FRCDPX
|
E1000_CTRL_SLU
);
E1000_WRITE_REG
(
hw
,
CTRL
,
ctrl
);
e1000_phy_hw_reset
(
hw
);
ret_val
=
e1000_phy_hw_reset
(
hw
);
if
(
ret_val
)
return
ret_val
;
}
/* Make sure we have a valid PHY */
...
...
@@ -961,11 +1047,29 @@ e1000_setup_copper_link(struct e1000_hw *hw)
hw
->
mac_type
==
e1000_82541_rev_2
||
hw
->
mac_type
==
e1000_82547_rev_2
)
hw
->
phy_reset_disable
=
FALSE
;
if
(
!
hw
->
phy_reset_disable
)
{
if
(
hw
->
phy_type
==
e1000_phy_igp
)
{
return
E1000_SUCCESS
;
}
/********************************************************************
* Copper link setup for e1000_phy_igp series.
*
* hw - Struct containing variables accessed by shared code
*********************************************************************/
static
int32_t
e1000_copper_link_igp_setup
(
struct
e1000_hw
*
hw
)
{
uint32_t
led_ctrl
;
int32_t
ret_val
;
uint16_t
phy_data
;
DEBUGFUNC
(
"e1000_copper_link_igp_setup"
);
if
(
hw
->
phy_reset_disable
)
return
E1000_SUCCESS
;
ret_val
=
e1000_phy_reset
(
hw
);
if
(
ret_val
)
{
if
(
ret_val
)
{
DEBUGOUT
(
"Error Resetting the PHY
\n
"
);
return
ret_val
;
}
...
...
@@ -981,22 +1085,26 @@ e1000_setup_copper_link(struct e1000_hw *hw)
/* disable lplu d3 during driver init */
ret_val
=
e1000_set_d3_lplu_state
(
hw
,
FALSE
);
if
(
ret_val
)
{
if
(
ret_val
)
{
DEBUGOUT
(
"Error Disabling LPLU D3
\n
"
);
return
ret_val
;
}
/* disable lplu d0 during driver init */
ret_val
=
e1000_set_d0_lplu_state
(
hw
,
FALSE
);
if
(
ret_val
)
{
DEBUGOUT
(
"Error Disabling LPLU D0
\n
"
);
return
ret_val
;
}
/* Configure mdi-mdix settings */
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CTRL
,
&
phy_data
);
if
(
ret_val
)
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CTRL
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
if
((
hw
->
mac_type
==
e1000_82541
)
||
(
hw
->
mac_type
==
e1000_82547
))
{
if
((
hw
->
mac_type
==
e1000_82541
)
||
(
hw
->
mac_type
==
e1000_82547
))
{
hw
->
dsp_config_state
=
e1000_dsp_config_disabled
;
/* Force MDI for earlier revs of the IGP PHY */
phy_data
&=
~
(
IGP01E1000_PSCR_AUTO_MDIX
|
IGP01E1000_PSCR_FORCE_MDI_MDIX
);
phy_data
&=
~
(
IGP01E1000_PSCR_AUTO_MDIX
|
IGP01E1000_PSCR_FORCE_MDI_MDIX
);
hw
->
mdix
=
1
;
}
else
{
...
...
@@ -1016,8 +1124,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
break
;
}
}
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CTRL
,
phy_data
);
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CTRL
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
...
...
@@ -1036,8 +1143,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
* resolution as hardware default. */
if
(
hw
->
autoneg_advertised
==
ADVERTISE_1000_FULL
)
{
/* Disable SmartSpeed */
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
&
phy_data
);
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
phy_data
&=
~
IGP01E1000_PSCFR_SMART_SPEED
;
...
...
@@ -1084,10 +1190,29 @@ e1000_setup_copper_link(struct e1000_hw *hw)
if
(
ret_val
)
return
ret_val
;
}
}
else
{
return
E1000_SUCCESS
;
}
/********************************************************************
* Copper link setup for e1000_phy_m88 series.
*
* hw - Struct containing variables accessed by shared code
*********************************************************************/
static
int32_t
e1000_copper_link_mgp_setup
(
struct
e1000_hw
*
hw
)
{
int32_t
ret_val
;
uint16_t
phy_data
;
DEBUGFUNC
(
"e1000_copper_link_mgp_setup"
);
if
(
hw
->
phy_reset_disable
)
return
E1000_SUCCESS
;
/* Enable CRS on TX. This must be set for half-duplex operation. */
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_SPEC_CTRL
,
&
phy_data
);
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_SPEC_CTRL
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
...
...
@@ -1127,16 +1252,14 @@ e1000_setup_copper_link(struct e1000_hw *hw)
phy_data
&=
~
M88E1000_PSCR_POLARITY_REVERSAL
;
if
(
hw
->
disable_polarity_correction
==
1
)
phy_data
|=
M88E1000_PSCR_POLARITY_REVERSAL
;
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_SPEC_CTRL
,
phy_data
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_SPEC_CTRL
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
/* Force TX_CLK in the Extended PHY Specific Control Register
* to 25MHz clock.
*/
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_EXT_PHY_SPEC_CTRL
,
&
phy_data
);
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_EXT_PHY_SPEC_CTRL
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
...
...
@@ -1148,8 +1271,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK
);
phy_data
|=
(
M88E1000_EPSCR_MASTER_DOWNSHIFT_1X
|
M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_EXT_PHY_SPEC_CTRL
,
phy_data
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_EXT_PHY_SPEC_CTRL
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
...
...
@@ -1160,24 +1282,24 @@ e1000_setup_copper_link(struct e1000_hw *hw)
DEBUGOUT
(
"Error Resetting the PHY
\n
"
);
return
ret_val
;
}
}
/* Options:
* autoneg = 1 (default)
* PHY will advertise value(s) parsed from
* autoneg_advertised and fc
* autoneg = 0
* PHY will be set to 10H, 10F, 100H, or 100F
* depending on value parsed from forced_speed_duplex.
*/
return
E1000_SUCCESS
;
}
/********************************************************************
* Setup auto-negotiation and flow control advertisements,
* and then perform auto-negotiation.
*
* hw - Struct containing variables accessed by shared code
*********************************************************************/
static
int32_t
e1000_copper_link_autoneg
(
struct
e1000_hw
*
hw
)
{
int32_t
ret_val
;
uint16_t
phy_data
;
DEBUGFUNC
(
"e1000_copper_link_autoneg"
);
/* Is autoneg enabled? This is enabled by default or by software
* override. If so, call e1000_phy_setup_autoneg routine to parse the
* autoneg_advertised and fc options. If autoneg is NOT enabled, then
* the user should have provided a speed/duplex override. If so, then
* call e1000_phy_force_speed_duplex to parse and set this up.
*/
if
(
hw
->
autoneg
)
{
/* Perform some bounds checking on the hw->autoneg_advertised
* parameter. If this variable is zero, then set it to the default.
*/
...
...
@@ -1219,37 +1341,31 @@ e1000_setup_copper_link(struct e1000_hw *hw)
return
ret_val
;
}
}
hw
->
get_link_status
=
TRUE
;
}
else
{
DEBUGOUT
(
"Forcing speed and duplex
\n
"
);
ret_val
=
e1000_phy_force_speed_duplex
(
hw
);
if
(
ret_val
)
{
DEBUGOUT
(
"Error Forcing Speed and Duplex
\n
"
);
return
ret_val
;
}
}
}
/* !hw->phy_reset_disable */
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
for
(
i
=
0
;
i
<
10
;
i
++
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
PHY_STATUS
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
ret_val
=
e1000_read_phy_reg
(
hw
,
PHY_STATUS
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
return
E1000_SUCCESS
;
}
/******************************************************************************
* Config the MAC and the PHY after link is up.
* 1) Set up the MAC to the current PHY speed/duplex
* if we are on 82543. If we
* are on newer silicon, we only need to configure
* collision distance in the Transmit Control Register.
* 2) Set up flow control on the MAC to that established with
* the link partner.
* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
static
int32_t
e1000_copper_link_postconfig
(
struct
e1000_hw
*
hw
)
{
int32_t
ret_val
;
DEBUGFUNC
(
"e1000_copper_link_postconfig"
);
if
(
phy_data
&
MII_SR_LINK_STATUS
)
{
/* We have link, so we need to finish the config process:
* 1) Set up the MAC to the current PHY speed/duplex
* if we are on 82543. If we
* are on newer silicon, we only need to configure
* collision distance in the Transmit Control Register.
* 2) Set up flow control on the MAC to that established with
* the link partner.
*/
if
(
hw
->
mac_type
>=
e1000_82544
)
{
e1000_config_collision_dist
(
hw
);
}
else
{
...
...
@@ -1264,8 +1380,8 @@ e1000_setup_copper_link(struct e1000_hw *hw)
DEBUGOUT
(
"Error Configuring Flow Control
\n
"
);
return
ret_val
;
}
DEBUGOUT
(
"Valid link established!!!
\n
"
);
/* Config DSP to improve Giga link quality */
if
(
hw
->
phy_type
==
e1000_phy_igp
)
{
ret_val
=
e1000_config_dsp_after_link_change
(
hw
,
TRUE
);
if
(
ret_val
)
{
...
...
@@ -1273,6 +1389,74 @@ e1000_setup_copper_link(struct e1000_hw *hw)
return
ret_val
;
}
}
return
E1000_SUCCESS
;
}
/******************************************************************************
* Detects which PHY is present and setup the speed and duplex
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
static
int32_t
e1000_setup_copper_link
(
struct
e1000_hw
*
hw
)
{
int32_t
ret_val
;
uint16_t
i
;
uint16_t
phy_data
;
DEBUGFUNC
(
"e1000_setup_copper_link"
);
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val
=
e1000_copper_link_preconfig
(
hw
);
if
(
ret_val
)
return
ret_val
;
if
(
hw
->
phy_type
==
e1000_phy_igp
||
hw
->
phy_type
==
e1000_phy_igp_2
)
{
ret_val
=
e1000_copper_link_igp_setup
(
hw
);
if
(
ret_val
)
return
ret_val
;
}
else
if
(
hw
->
phy_type
==
e1000_phy_m88
)
{
ret_val
=
e1000_copper_link_mgp_setup
(
hw
);
if
(
ret_val
)
return
ret_val
;
}
if
(
hw
->
autoneg
)
{
/* Setup autoneg and flow control advertisement
* and perform autonegotiation */
ret_val
=
e1000_copper_link_autoneg
(
hw
);
if
(
ret_val
)
return
ret_val
;
}
else
{
/* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */
DEBUGOUT
(
"Forcing speed and duplex
\n
"
);
ret_val
=
e1000_phy_force_speed_duplex
(
hw
);
if
(
ret_val
)
{
DEBUGOUT
(
"Error Forcing Speed and Duplex
\n
"
);
return
ret_val
;
}
}
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
for
(
i
=
0
;
i
<
10
;
i
++
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
PHY_STATUS
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
ret_val
=
e1000_read_phy_reg
(
hw
,
PHY_STATUS
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
if
(
phy_data
&
MII_SR_LINK_STATUS
)
{
/* Config the MAC and PHY after link is up */
ret_val
=
e1000_copper_link_postconfig
(
hw
);
if
(
ret_val
)
return
ret_val
;
DEBUGOUT
(
"Valid link established!!!
\n
"
);
return
E1000_SUCCESS
;
}
...
...
@@ -1678,6 +1862,11 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
DEBUGFUNC
(
"e1000_config_mac_to_phy"
);
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
if
(
hw
->
mac_type
>=
e1000_82544
)
return
E1000_SUCCESS
;
/* Read the Device Control Register and set the bits to Force Speed
* and Duplex.
*/
...
...
@@ -1688,34 +1877,14 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
/* Set up duplex in the Device Control and Transmit Control
* registers depending on negotiated values.
*/
if
(
hw
->
phy_type
==
e1000_phy_igp
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_STATUS
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
if
(
phy_data
&
IGP01E1000_PSSR_FULL_DUPLEX
)
ctrl
|=
E1000_CTRL_FD
;
else
ctrl
&=
~
E1000_CTRL_FD
;
e1000_config_collision_dist
(
hw
);
/* Set up speed in the Device Control register depending on
* negotiated values.
*/
if
((
phy_data
&
IGP01E1000_PSSR_SPEED_MASK
)
==
IGP01E1000_PSSR_SPEED_1000MBPS
)
ctrl
|=
E1000_CTRL_SPD_1000
;
else
if
((
phy_data
&
IGP01E1000_PSSR_SPEED_MASK
)
==
IGP01E1000_PSSR_SPEED_100MBPS
)
ctrl
|=
E1000_CTRL_SPD_100
;
}
else
{
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_SPEC_STATUS
,
&
phy_data
);
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_SPEC_STATUS
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
if
(
phy_data
&
M88E1000_PSSR_DPLX
)
ctrl
|=
E1000_CTRL_FD
;
else
ctrl
&=
~
E1000_CTRL_FD
;
if
(
phy_data
&
M88E1000_PSSR_DPLX
)
ctrl
|=
E1000_CTRL_FD
;
else
ctrl
&=
~
E1000_CTRL_FD
;
e1000_config_collision_dist
(
hw
);
...
...
@@ -1726,7 +1895,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
ctrl
|=
E1000_CTRL_SPD_1000
;
else
if
((
phy_data
&
M88E1000_PSSR_SPEED
)
==
M88E1000_PSSR_100MBS
)
ctrl
|=
E1000_CTRL_SPD_100
;
}
/* Write the configured values back to the Device Control Reg. */
E1000_WRITE_REG
(
hw
,
CTRL
,
ctrl
);
return
E1000_SUCCESS
;
...
...
@@ -2494,8 +2663,8 @@ e1000_read_phy_reg(struct e1000_hw *hw,
DEBUGFUNC
(
"e1000_read_phy_reg"
);
if
(
hw
->
phy_type
==
e1000_phy_igp
&&
if
((
hw
->
phy_type
==
e1000_phy_igp
||
hw
->
phy_type
==
e1000_phy_igp_2
)
&&
(
reg_addr
>
MAX_PHY_MULTI_PAGE_REG
))
{
ret_val
=
e1000_write_phy_reg_ex
(
hw
,
IGP01E1000_PHY_PAGE_SELECT
,
(
uint16_t
)
reg_addr
);
...
...
@@ -2600,8 +2769,8 @@ e1000_write_phy_reg(struct e1000_hw *hw,
DEBUGFUNC
(
"e1000_write_phy_reg"
);
if
(
hw
->
phy_type
==
e1000_phy_igp
&&
if
((
hw
->
phy_type
==
e1000_phy_igp
||
hw
->
phy_type
==
e1000_phy_igp_2
)
&&
(
reg_addr
>
MAX_PHY_MULTI_PAGE_REG
))
{
ret_val
=
e1000_write_phy_reg_ex
(
hw
,
IGP01E1000_PHY_PAGE_SELECT
,
(
uint16_t
)
reg_addr
);
...
...
@@ -2679,19 +2848,27 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
return
E1000_SUCCESS
;
}
/******************************************************************************
* Returns the PHY to the power-on reset state
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
void
int32_t
e1000_phy_hw_reset
(
struct
e1000_hw
*
hw
)
{
uint32_t
ctrl
,
ctrl_ext
;
uint32_t
led_ctrl
;
int32_t
ret_val
;
DEBUGFUNC
(
"e1000_phy_hw_reset"
);
/* In the case of the phy reset being blocked, it's not an error, we
* simply return success without performing the reset. */
ret_val
=
e1000_check_phy_reset_block
(
hw
);
if
(
ret_val
)
return
E1000_SUCCESS
;
DEBUGOUT
(
"Resetting Phy...
\n
"
);
if
(
hw
->
mac_type
>
e1000_82543
)
{
...
...
@@ -2727,6 +2904,11 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
led_ctrl
|=
(
IGP_ACTIVITY_LED_ENABLE
|
IGP_LED3_MODE
);
E1000_WRITE_REG
(
hw
,
LEDCTL
,
led_ctrl
);
}
/* Wait for FW to finish PHY configuration. */
ret_val
=
e1000_get_phy_cfg_done
(
hw
);
return
ret_val
;
}
/******************************************************************************
...
...
@@ -2744,7 +2926,19 @@ e1000_phy_reset(struct e1000_hw *hw)
DEBUGFUNC
(
"e1000_phy_reset"
);
if
(
hw
->
mac_type
!=
e1000_82541_rev_2
)
{
/* In the case of the phy reset being blocked, it's not an error, we
* simply return success without performing the reset. */
ret_val
=
e1000_check_phy_reset_block
(
hw
);
if
(
ret_val
)
return
E1000_SUCCESS
;
switch
(
hw
->
mac_type
)
{
case
e1000_82541_rev_2
:
ret_val
=
e1000_phy_hw_reset
(
hw
);
if
(
ret_val
)
return
ret_val
;
break
;
default:
ret_val
=
e1000_read_phy_reg
(
hw
,
PHY_CTRL
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
...
...
@@ -2755,9 +2949,10 @@ e1000_phy_reset(struct e1000_hw *hw)
return
ret_val
;
udelay
(
1
);
}
else
e1000_phy_hw_reset
(
hw
);
break
;
}
if
(
hw
->
phy_type
==
e1000_phy_igp
)
if
(
hw
->
phy_type
==
e1000_phy_igp
||
hw
->
phy_type
==
e1000_phy_igp_2
)
e1000_phy_init_script
(
hw
);
return
E1000_SUCCESS
;
...
...
@@ -2811,6 +3006,9 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
case
e1000_82547_rev_2
:
if
(
hw
->
phy_id
==
IGP01E1000_I_PHY_ID
)
match
=
TRUE
;
break
;
case
e1000_82573
:
if
(
hw
->
phy_id
==
M88E1111_I_PHY_ID
)
match
=
TRUE
;
break
;
default:
DEBUGOUT1
(
"Invalid MAC type %d
\n
"
,
hw
->
mac_type
);
return
-
E1000_ERR_CONFIG
;
...
...
@@ -2866,7 +3064,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
phy_info
->
downshift
=
hw
->
speed_downgraded
;
phy_info
->
downshift
=
(
e1000_downshift
)
hw
->
speed_downgraded
;
/* IGP01E1000 does not need to support it. */
phy_info
->
extended_10bt_distance
=
e1000_10bt_ext_dist_enable_normal
;
...
...
@@ -2905,7 +3103,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
if
(
ret_val
)
return
ret_val
;
/*
transal
te to old method */
/*
Transla
te to old method */
average
=
(
max_length
+
min_length
)
/
2
;
if
(
average
<=
e1000_igp_cable_length_50
)
...
...
@@ -2940,7 +3138,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
phy_info
->
downshift
=
hw
->
speed_downgraded
;
phy_info
->
downshift
=
(
e1000_downshift
)
hw
->
speed_downgraded
;
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_SPEC_CTRL
,
&
phy_data
);
if
(
ret_val
)
...
...
@@ -3029,7 +3227,8 @@ e1000_phy_get_info(struct e1000_hw *hw,
return
-
E1000_ERR_CONFIG
;
}
if
(
hw
->
phy_type
==
e1000_phy_igp
)
if
(
hw
->
phy_type
==
e1000_phy_igp
||
hw
->
phy_type
==
e1000_phy_igp_2
)
return
e1000_phy_igp_get_info
(
hw
,
phy_info
);
else
return
e1000_phy_m88_get_info
(
hw
,
phy_info
);
...
...
@@ -3055,11 +3254,12 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
void
int32_t
e1000_init_eeprom_params
(
struct
e1000_hw
*
hw
)
{
struct
e1000_eeprom_info
*
eeprom
=
&
hw
->
eeprom
;
uint32_t
eecd
=
E1000_READ_REG
(
hw
,
EECD
);
int32_t
ret_val
=
E1000_SUCCESS
;
uint16_t
eeprom_size
;
DEBUGFUNC
(
"e1000_init_eeprom_params"
);
...
...
@@ -3074,6 +3274,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom
->
opcode_bits
=
3
;
eeprom
->
address_bits
=
6
;
eeprom
->
delay_usec
=
50
;
eeprom
->
use_eerd
=
FALSE
;
eeprom
->
use_eewr
=
FALSE
;
break
;
case
e1000_82540
:
case
e1000_82545
:
...
...
@@ -3090,6 +3292,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom
->
word_size
=
64
;
eeprom
->
address_bits
=
6
;
}
eeprom
->
use_eerd
=
FALSE
;
eeprom
->
use_eewr
=
FALSE
;
break
;
case
e1000_82541
:
case
e1000_82541_rev_2
:
...
...
@@ -3118,42 +3322,60 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom
->
address_bits
=
6
;
}
}
eeprom
->
use_eerd
=
FALSE
;
eeprom
->
use_eewr
=
FALSE
;
break
;
default:
break
;
case
e1000_82573
:
eeprom
->
type
=
e1000_eeprom_spi
;
eeprom
->
opcode_bits
=
8
;
eeprom
->
delay_usec
=
1
;
if
(
eecd
&
E1000_EECD_ADDR_BITS
)
{
eeprom
->
page_size
=
32
;
eeprom
->
address_bits
=
16
;
}
else
{
eeprom
->
page_size
=
8
;
eeprom
->
address_bits
=
8
;
}
if
(
eeprom
->
type
==
e1000_eeprom_spi
)
{
eeprom
->
word_size
=
64
;
if
(
e1000_read_eeprom
(
hw
,
EEPROM_CFG
,
1
,
&
eeprom_size
)
==
0
)
{
eeprom_size
&=
EEPROM_SIZE_MASK
;
switch
(
eeprom_size
)
{
case
EEPROM_SIZE_16KB
:
eeprom
->
word_size
=
8192
;
break
;
case
EEPROM_SIZE_8KB
:
eeprom
->
word_size
=
4096
;
break
;
case
EEPROM_SIZE_4KB
:
eeprom
->
use_eerd
=
TRUE
;
eeprom
->
use_eewr
=
TRUE
;
if
(
e1000_is_onboard_nvm_eeprom
(
hw
)
==
FALSE
)
{
eeprom
->
type
=
e1000_eeprom_flash
;
eeprom
->
word_size
=
2048
;
/* Ensure that the Autonomous FLASH update bit is cleared due to
* Flash update issue on parts which use a FLASH for NVM. */
eecd
&=
~
E1000_EECD_AUPDEN
;
E1000_WRITE_REG
(
hw
,
EECD
,
eecd
);
}
break
;
case
EEPROM_SIZE_2KB
:
eeprom
->
word_size
=
1024
;
break
;
case
EEPROM_SIZE_1KB
:
eeprom
->
word_size
=
512
;
break
;
case
EEPROM_SIZE_512B
:
eeprom
->
word_size
=
256
;
break
;
case
EEPROM_SIZE_128B
:
default:
eeprom
->
word_size
=
64
;
break
;
}
if
(
eeprom
->
type
==
e1000_eeprom_spi
)
{
/* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
* 32KB (incremented by powers of 2).
*/
if
(
hw
->
mac_type
<=
e1000_82547_rev_2
)
{
/* Set to default value for initial eeprom read. */
eeprom
->
word_size
=
64
;
ret_val
=
e1000_read_eeprom
(
hw
,
EEPROM_CFG
,
1
,
&
eeprom_size
);
if
(
ret_val
)
return
ret_val
;
eeprom_size
=
(
eeprom_size
&
EEPROM_SIZE_MASK
)
>>
EEPROM_SIZE_SHIFT
;
/* 256B eeprom size was not supported in earlier hardware, so we
* bump eeprom_size up one to ensure that "1" (which maps to 256B)
* is never the result used in the shifting logic below. */
if
(
eeprom_size
)
eeprom_size
++
;
}
else
{
eeprom_size
=
(
uint16_t
)((
eecd
&
E1000_EECD_SIZE_EX_MASK
)
>>
E1000_EECD_SIZE_EX_SHIFT
);
}
eeprom
->
word_size
=
1
<<
(
eeprom_size
+
EEPROM_WORD_SIZE_SHIFT
);
}
return
ret_val
;
}
/******************************************************************************
...
...
@@ -3306,8 +3528,12 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
DEBUGFUNC
(
"e1000_acquire_eeprom"
);
if
(
e1000_get_hw_eeprom_semaphore
(
hw
))
return
-
E1000_ERR_EEPROM
;
eecd
=
E1000_READ_REG
(
hw
,
EECD
);
if
(
hw
->
mac_type
!=
e1000_82573
)
{
/* Request EEPROM Access */
if
(
hw
->
mac_type
>
e1000_82544
)
{
eecd
|=
E1000_EECD_REQ
;
...
...
@@ -3326,6 +3552,7 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
return
-
E1000_ERR_EEPROM
;
}
}
}
/* Setup EEPROM for Read/Write */
...
...
@@ -3443,6 +3670,8 @@ e1000_release_eeprom(struct e1000_hw *hw)
eecd
&=
~
E1000_EECD_REQ
;
E1000_WRITE_REG
(
hw
,
EECD
,
eecd
);
}
e1000_put_hw_eeprom_semaphore
(
hw
);
}
/******************************************************************************
...
...
@@ -3504,8 +3733,10 @@ e1000_read_eeprom(struct e1000_hw *hw,
{
struct
e1000_eeprom_info
*
eeprom
=
&
hw
->
eeprom
;
uint32_t
i
=
0
;
int32_t
ret_val
;
DEBUGFUNC
(
"e1000_read_eeprom"
);
/* A check for invalid values: offset too large, too many words, and not
* enough words.
*/
...
...
@@ -3515,9 +3746,23 @@ e1000_read_eeprom(struct e1000_hw *hw,
return
-
E1000_ERR_EEPROM
;
}
/* FLASH reads without acquiring the semaphore are safe in 82573-based
* controllers.
*/
if
((
e1000_is_onboard_nvm_eeprom
(
hw
)
==
TRUE
)
||
(
hw
->
mac_type
!=
e1000_82573
))
{
/* Prepare the EEPROM for reading */
if
(
e1000_acquire_eeprom
(
hw
)
!=
E1000_SUCCESS
)
return
-
E1000_ERR_EEPROM
;
}
if
(
eeprom
->
use_eerd
==
TRUE
)
{
ret_val
=
e1000_read_eeprom_eerd
(
hw
,
offset
,
words
,
data
);
if
((
e1000_is_onboard_nvm_eeprom
(
hw
)
==
TRUE
)
||
(
hw
->
mac_type
!=
e1000_82573
))
e1000_release_eeprom
(
hw
);
return
ret_val
;
}
if
(
eeprom
->
type
==
e1000_eeprom_spi
)
{
uint16_t
word_in
;
...
...
@@ -3568,6 +3813,132 @@ e1000_read_eeprom(struct e1000_hw *hw,
return
E1000_SUCCESS
;
}
/******************************************************************************
* Reads a 16 bit word from the EEPROM using the EERD register.
*
* hw - Struct containing variables accessed by shared code
* offset - offset of word in the EEPROM to read
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
int32_t
e1000_read_eeprom_eerd
(
struct
e1000_hw
*
hw
,
uint16_t
offset
,
uint16_t
words
,
uint16_t
*
data
)
{
uint32_t
i
,
eerd
=
0
;
int32_t
error
=
0
;
for
(
i
=
0
;
i
<
words
;
i
++
)
{
eerd
=
((
offset
+
i
)
<<
E1000_EEPROM_RW_ADDR_SHIFT
)
+
E1000_EEPROM_RW_REG_START
;
E1000_WRITE_REG
(
hw
,
EERD
,
eerd
);
error
=
e1000_poll_eerd_eewr_done
(
hw
,
E1000_EEPROM_POLL_READ
);
if
(
error
)
{
break
;
}
data
[
i
]
=
(
E1000_READ_REG
(
hw
,
EERD
)
>>
E1000_EEPROM_RW_REG_DATA
);
}
return
error
;
}
/******************************************************************************
* Writes a 16 bit word from the EEPROM using the EEWR register.
*
* hw - Struct containing variables accessed by shared code
* offset - offset of word in the EEPROM to read
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
int32_t
e1000_write_eeprom_eewr
(
struct
e1000_hw
*
hw
,
uint16_t
offset
,
uint16_t
words
,
uint16_t
*
data
)
{
uint32_t
register_value
=
0
;
uint32_t
i
=
0
;
int32_t
error
=
0
;
for
(
i
=
0
;
i
<
words
;
i
++
)
{
register_value
=
(
data
[
i
]
<<
E1000_EEPROM_RW_REG_DATA
)
|
((
offset
+
i
)
<<
E1000_EEPROM_RW_ADDR_SHIFT
)
|
E1000_EEPROM_RW_REG_START
;
error
=
e1000_poll_eerd_eewr_done
(
hw
,
E1000_EEPROM_POLL_WRITE
);
if
(
error
)
{
break
;
}
E1000_WRITE_REG
(
hw
,
EEWR
,
register_value
);
error
=
e1000_poll_eerd_eewr_done
(
hw
,
E1000_EEPROM_POLL_WRITE
);
if
(
error
)
{
break
;
}
}
return
error
;
}
/******************************************************************************
* Polls the status bit (bit 1) of the EERD to determine when the read is done.
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
int32_t
e1000_poll_eerd_eewr_done
(
struct
e1000_hw
*
hw
,
int
eerd
)
{
uint32_t
attempts
=
100000
;
uint32_t
i
,
reg
=
0
;
int32_t
done
=
E1000_ERR_EEPROM
;
for
(
i
=
0
;
i
<
attempts
;
i
++
)
{
if
(
eerd
==
E1000_EEPROM_POLL_READ
)
reg
=
E1000_READ_REG
(
hw
,
EERD
);
else
reg
=
E1000_READ_REG
(
hw
,
EEWR
);
if
(
reg
&
E1000_EEPROM_RW_REG_DONE
)
{
done
=
E1000_SUCCESS
;
break
;
}
udelay
(
5
);
}
return
done
;
}
/***************************************************************************
* Description: Determines if the onboard NVM is FLASH or EEPROM.
*
* hw - Struct containing variables accessed by shared code
****************************************************************************/
boolean_t
e1000_is_onboard_nvm_eeprom
(
struct
e1000_hw
*
hw
)
{
uint32_t
eecd
=
0
;
if
(
hw
->
mac_type
==
e1000_82573
)
{
eecd
=
E1000_READ_REG
(
hw
,
EECD
);
/* Isolate bits 15 & 16 */
eecd
=
((
eecd
>>
15
)
&
0x03
);
/* If both bits are set, device is Flash type */
if
(
eecd
==
0x03
)
{
return
FALSE
;
}
}
return
TRUE
;
}
/******************************************************************************
* Verifies that the EEPROM has a valid checksum
*
...
...
@@ -3585,6 +3956,25 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
DEBUGFUNC
(
"e1000_validate_eeprom_checksum"
);
if
((
hw
->
mac_type
==
e1000_82573
)
&&
(
e1000_is_onboard_nvm_eeprom
(
hw
)
==
FALSE
))
{
/* Check bit 4 of word 10h. If it is 0, firmware is done updating
* 10h-12h. Checksum may need to be fixed. */
e1000_read_eeprom
(
hw
,
0x10
,
1
,
&
eeprom_data
);
if
((
eeprom_data
&
0x10
)
==
0
)
{
/* Read 0x23 and check bit 15. This bit is a 1 when the checksum
* has already been fixed. If the checksum is still wrong and this
* bit is a 1, we need to return bad checksum. Otherwise, we need
* to set this bit to a 1 and update the checksum. */
e1000_read_eeprom
(
hw
,
0x23
,
1
,
&
eeprom_data
);
if
((
eeprom_data
&
0x8000
)
==
0
)
{
eeprom_data
|=
0x8000
;
e1000_write_eeprom
(
hw
,
0x23
,
1
,
&
eeprom_data
);
e1000_update_eeprom_checksum
(
hw
);
}
}
}
for
(
i
=
0
;
i
<
(
EEPROM_CHECKSUM_REG
+
1
);
i
++
)
{
if
(
e1000_read_eeprom
(
hw
,
i
,
1
,
&
eeprom_data
)
<
0
)
{
DEBUGOUT
(
"EEPROM Read Error
\n
"
);
...
...
@@ -3628,6 +4018,8 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
if
(
e1000_write_eeprom
(
hw
,
EEPROM_CHECKSUM_REG
,
1
,
&
checksum
)
<
0
)
{
DEBUGOUT
(
"EEPROM Write Error
\n
"
);
return
-
E1000_ERR_EEPROM
;
}
else
if
(
hw
->
eeprom
.
type
==
e1000_eeprom_flash
)
{
e1000_commit_shadow_ram
(
hw
);
}
return
E1000_SUCCESS
;
}
...
...
@@ -3663,6 +4055,10 @@ e1000_write_eeprom(struct e1000_hw *hw,
return
-
E1000_ERR_EEPROM
;
}
/* 82573 reads only through eerd */
if
(
eeprom
->
use_eewr
==
TRUE
)
return
e1000_write_eeprom_eewr
(
hw
,
offset
,
words
,
data
);
/* Prepare the EEPROM for writing */
if
(
e1000_acquire_eeprom
(
hw
)
!=
E1000_SUCCESS
)
return
-
E1000_ERR_EEPROM
;
...
...
@@ -3832,6 +4228,65 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
return
E1000_SUCCESS
;
}
/******************************************************************************
* Flushes the cached eeprom to NVM. This is done by saving the modified values
* in the eeprom cache and the non modified values in the currently active bank
* to the new bank.
*
* hw - Struct containing variables accessed by shared code
* offset - offset of word in the EEPROM to read
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
int32_t
e1000_commit_shadow_ram
(
struct
e1000_hw
*
hw
)
{
uint32_t
attempts
=
100000
;
uint32_t
eecd
=
0
;
uint32_t
flop
=
0
;
uint32_t
i
=
0
;
int32_t
error
=
E1000_SUCCESS
;
/* The flop register will be used to determine if flash type is STM */
flop
=
E1000_READ_REG
(
hw
,
FLOP
);
if
(
hw
->
mac_type
==
e1000_82573
)
{
for
(
i
=
0
;
i
<
attempts
;
i
++
)
{
eecd
=
E1000_READ_REG
(
hw
,
EECD
);
if
((
eecd
&
E1000_EECD_FLUPD
)
==
0
)
{
break
;
}
udelay
(
5
);
}
if
(
i
==
attempts
)
{
return
-
E1000_ERR_EEPROM
;
}
/* If STM opcode located in bits 15:8 of flop, reset firmware */
if
((
flop
&
0xFF00
)
==
E1000_STM_OPCODE
)
{
E1000_WRITE_REG
(
hw
,
HICR
,
E1000_HICR_FW_RESET
);
}
/* Perform the flash update */
E1000_WRITE_REG
(
hw
,
EECD
,
eecd
|
E1000_EECD_FLUPD
);
for
(
i
=
0
;
i
<
attempts
;
i
++
)
{
eecd
=
E1000_READ_REG
(
hw
,
EECD
);
if
((
eecd
&
E1000_EECD_FLUPD
)
==
0
)
{
break
;
}
udelay
(
5
);
}
if
(
i
==
attempts
)
{
return
-
E1000_ERR_EEPROM
;
}
}
return
error
;
}
/******************************************************************************
* Reads the adapter's part number from the EEPROM
*
...
...
@@ -3911,6 +4366,7 @@ void
e1000_init_rx_addrs
(
struct
e1000_hw
*
hw
)
{
uint32_t
i
;
uint32_t
rar_num
;
DEBUGFUNC
(
"e1000_init_rx_addrs"
);
...
...
@@ -3919,9 +4375,10 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
e1000_rar_set
(
hw
,
hw
->
mac_addr
,
0
);
rar_num
=
E1000_RAR_ENTRIES
;
/* Zero out the other 15 receive addresses. */
DEBUGOUT
(
"Clearing RAR[1-15]
\n
"
);
for
(
i
=
1
;
i
<
E1000_RAR_ENTRIES
;
i
++
)
{
for
(
i
=
1
;
i
<
rar_num
;
i
++
)
{
E1000_WRITE_REG_ARRAY
(
hw
,
RA
,
(
i
<<
1
),
0
);
E1000_WRITE_REG_ARRAY
(
hw
,
RA
,
((
i
<<
1
)
+
1
),
0
);
}
...
...
@@ -3950,6 +4407,8 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
{
uint32_t
hash_value
;
uint32_t
i
;
uint32_t
num_rar_entry
;
uint32_t
num_mta_entry
;
DEBUGFUNC
(
"e1000_mc_addr_list_update"
);
...
...
@@ -3958,14 +4417,16 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
/* Clear RAR[1-15] */
DEBUGOUT
(
" Clearing RAR[1-15]
\n
"
);
for
(
i
=
rar_used_count
;
i
<
E1000_RAR_ENTRIES
;
i
++
)
{
num_rar_entry
=
E1000_RAR_ENTRIES
;
for
(
i
=
rar_used_count
;
i
<
num_rar_entry
;
i
++
)
{
E1000_WRITE_REG_ARRAY
(
hw
,
RA
,
(
i
<<
1
),
0
);
E1000_WRITE_REG_ARRAY
(
hw
,
RA
,
((
i
<<
1
)
+
1
),
0
);
}
/* Clear the MTA */
DEBUGOUT
(
" Clearing MTA
\n
"
);
for
(
i
=
0
;
i
<
E1000_NUM_MTA_REGISTERS
;
i
++
)
{
num_mta_entry
=
E1000_NUM_MTA_REGISTERS
;
for
(
i
=
0
;
i
<
num_mta_entry
;
i
++
)
{
E1000_WRITE_REG_ARRAY
(
hw
,
MTA
,
i
,
0
);
}
...
...
@@ -3989,7 +4450,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
/* Place this multicast address in the RAR if there is room, *
* else put it in the MTA
*/
if
(
rar_used_count
<
E1000_RAR_ENTRIES
)
{
if
(
rar_used_count
<
num_rar_entry
)
{
e1000_rar_set
(
hw
,
mc_addr_list
+
(
i
*
(
ETH_LENGTH_OF_ADDRESS
+
pad
)),
rar_used_count
);
...
...
@@ -4040,6 +4501,7 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
}
hash_value
&=
0xFFF
;
return
hash_value
;
}
...
...
@@ -4144,12 +4606,33 @@ void
e1000_clear_vfta
(
struct
e1000_hw
*
hw
)
{
uint32_t
offset
;
for
(
offset
=
0
;
offset
<
E1000_VLAN_FILTER_TBL_SIZE
;
offset
++
)
E1000_WRITE_REG_ARRAY
(
hw
,
VFTA
,
offset
,
0
);
uint32_t
vfta_value
=
0
;
uint32_t
vfta_offset
=
0
;
uint32_t
vfta_bit_in_reg
=
0
;
if
(
hw
->
mac_type
==
e1000_82573
)
{
if
(
hw
->
mng_cookie
.
vlan_id
!=
0
)
{
/* The VFTA is a 4096b bit-field, each identifying a single VLAN
* ID. The following operations determine which 32b entry
* (i.e. offset) into the array we want to set the VLAN ID
* (i.e. bit) of the manageability unit. */
vfta_offset
=
(
hw
->
mng_cookie
.
vlan_id
>>
E1000_VFTA_ENTRY_SHIFT
)
&
E1000_VFTA_ENTRY_MASK
;
vfta_bit_in_reg
=
1
<<
(
hw
->
mng_cookie
.
vlan_id
&
E1000_VFTA_ENTRY_BIT_SHIFT_MASK
);
}
}
for
(
offset
=
0
;
offset
<
E1000_VLAN_FILTER_TBL_SIZE
;
offset
++
)
{
/* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of the
* manageability unit */
vfta_value
=
(
offset
==
vfta_offset
)
?
vfta_bit_in_reg
:
0
;
E1000_WRITE_REG_ARRAY
(
hw
,
VFTA
,
offset
,
vfta_value
);
}
}
static
int32_t
int32_t
e1000_id_led_init
(
struct
e1000_hw
*
hw
)
{
uint32_t
ledctl
;
...
...
@@ -4480,6 +4963,19 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp
=
E1000_READ_REG
(
hw
,
MGTPRC
);
temp
=
E1000_READ_REG
(
hw
,
MGTPDC
);
temp
=
E1000_READ_REG
(
hw
,
MGTPTC
);
if
(
hw
->
mac_type
<=
e1000_82547_rev_2
)
return
;
temp
=
E1000_READ_REG
(
hw
,
IAC
);
temp
=
E1000_READ_REG
(
hw
,
ICRXOC
);
temp
=
E1000_READ_REG
(
hw
,
ICRXPTC
);
temp
=
E1000_READ_REG
(
hw
,
ICRXATC
);
temp
=
E1000_READ_REG
(
hw
,
ICTXPTC
);
temp
=
E1000_READ_REG
(
hw
,
ICTXATC
);
temp
=
E1000_READ_REG
(
hw
,
ICTXQEC
);
temp
=
E1000_READ_REG
(
hw
,
ICTXQMTC
);
temp
=
E1000_READ_REG
(
hw
,
ICRXDMTC
);
}
/******************************************************************************
...
...
@@ -4646,6 +5142,11 @@ e1000_get_bus_info(struct e1000_hw *hw)
hw
->
bus_speed
=
e1000_bus_speed_unknown
;
hw
->
bus_width
=
e1000_bus_width_unknown
;
break
;
case
e1000_82573
:
hw
->
bus_type
=
e1000_bus_type_pci_express
;
hw
->
bus_speed
=
e1000_bus_speed_2500
;
hw
->
bus_width
=
e1000_bus_width_pciex_4
;
break
;
default:
status
=
E1000_READ_REG
(
hw
,
STATUS
);
hw
->
bus_type
=
(
status
&
E1000_STATUS_PCIX_MODE
)
?
...
...
@@ -4749,6 +5250,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
/* Use old method for Phy older than IGP */
if
(
hw
->
phy_type
==
e1000_phy_m88
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_SPEC_STATUS
,
&
phy_data
);
if
(
ret_val
)
...
...
@@ -4865,7 +5367,8 @@ e1000_check_polarity(struct e1000_hw *hw,
return
ret_val
;
*
polarity
=
(
phy_data
&
M88E1000_PSSR_REV_POLARITY
)
>>
M88E1000_PSSR_REV_POLARITY_SHIFT
;
}
else
if
(
hw
->
phy_type
==
e1000_phy_igp
)
{
}
else
if
(
hw
->
phy_type
==
e1000_phy_igp
||
hw
->
phy_type
==
e1000_phy_igp_2
)
{
/* Read the Status register to check the speed */
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_STATUS
,
&
phy_data
);
...
...
@@ -4917,7 +5420,8 @@ e1000_check_downshift(struct e1000_hw *hw)
DEBUGFUNC
(
"e1000_check_downshift"
);
if
(
hw
->
phy_type
==
e1000_phy_igp
)
{
if
(
hw
->
phy_type
==
e1000_phy_igp
||
hw
->
phy_type
==
e1000_phy_igp_2
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_LINK_HEALTH
,
&
phy_data
);
if
(
ret_val
)
...
...
@@ -4933,6 +5437,7 @@ e1000_check_downshift(struct e1000_hw *hw)
hw
->
speed_downgraded
=
(
phy_data
&
M88E1000_PSSR_DOWNSHIFT
)
>>
M88E1000_PSSR_DOWNSHIFT_SHIFT
;
}
return
E1000_SUCCESS
;
}
...
...
@@ -5047,7 +5552,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
if
(
ret_val
)
return
ret_val
;
msec_delay
(
20
);
msec_delay
_irq
(
20
);
ret_val
=
e1000_write_phy_reg
(
hw
,
0x0000
,
IGP01E1000_IEEE_FORCE_GIGA
);
...
...
@@ -5071,7 +5576,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
if
(
ret_val
)
return
ret_val
;
msec_delay
(
20
);
msec_delay
_irq
(
20
);
/* Now enable the transmitter */
ret_val
=
e1000_write_phy_reg
(
hw
,
0x2F5B
,
phy_saved_data
);
...
...
@@ -5096,7 +5601,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
if
(
ret_val
)
return
ret_val
;
msec_delay
(
20
);
msec_delay
_irq
(
20
);
ret_val
=
e1000_write_phy_reg
(
hw
,
0x0000
,
IGP01E1000_IEEE_FORCE_GIGA
);
...
...
@@ -5112,7 +5617,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
if
(
ret_val
)
return
ret_val
;
msec_delay
(
20
);
msec_delay
_irq
(
20
);
/* Now enable the transmitter */
ret_val
=
e1000_write_phy_reg
(
hw
,
0x2F5B
,
phy_saved_data
);
...
...
@@ -5187,22 +5692,36 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
uint16_t
phy_data
;
DEBUGFUNC
(
"e1000_set_d3_lplu_state"
);
if
(
!
((
hw
->
mac_type
==
e1000_82541_rev_2
)
||
(
hw
->
mac_type
==
e1000_82547_rev_2
)))
if
(
hw
->
phy_type
!=
e1000_phy_igp
&&
hw
->
phy_type
!=
e1000_phy_igp_2
)
return
E1000_SUCCESS
;
/* During driver activity LPLU should not be used or it will attain link
* from the lowest speeds starting from 10Mbps. The capability is used for
* Dx transitions and states */
if
(
hw
->
mac_type
==
e1000_82541_rev_2
||
hw
->
mac_type
==
e1000_82547_rev_2
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_GMII_FIFO
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
else
{
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP02E1000_PHY_POWER_MGMT
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
if
(
!
active
)
{
if
(
hw
->
mac_type
==
e1000_82541_rev_2
||
hw
->
mac_type
==
e1000_82547_rev_2
)
{
phy_data
&=
~
IGP01E1000_GMII_FLEX_SPD
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP01E1000_GMII_FIFO
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
else
{
phy_data
&=
~
IGP02E1000_PM_D3_LPLU
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP02E1000_PHY_POWER_MGMT
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
* Dx states where the power conservation is most important. During
...
...
@@ -5236,10 +5755,19 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
(
hw
->
autoneg_advertised
==
AUTONEG_ADVERTISE_10_ALL
)
||
(
hw
->
autoneg_advertised
==
AUTONEG_ADVERTISE_10_100_ALL
))
{
if
(
hw
->
mac_type
==
e1000_82541_rev_2
||
hw
->
mac_type
==
e1000_82547_rev_2
)
{
phy_data
|=
IGP01E1000_GMII_FLEX_SPD
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP01E1000_GMII_FIFO
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
else
{
phy_data
|=
IGP02E1000_PM_D3_LPLU
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP02E1000_PHY_POWER_MGMT
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
/* When LPLU is enabled we should disable SmartSpeed */
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
&
phy_data
);
...
...
@@ -5255,54 +5783,139 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
return
E1000_SUCCESS
;
}
/******************************************************************************
* Change VCO speed register to improve Bit Error Rate performance of SERDES.
/*****************************************************************************
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static
int32_t
e1000_set_vco_speed
(
struct
e1000_hw
*
hw
)
* This function sets the lplu d0 state according to the active flag. When
* activating lplu this function also disables smart speed and vise versa.
* lplu will not be activated unless the device autonegotiation advertisment
* meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
* hw: Struct containing variables accessed by shared code
* active - true to enable lplu false to disable lplu.
*
* returns: - E1000_ERR_PHY if fail to read/write the PHY
* E1000_SUCCESS at any other case.
*
****************************************************************************/
int32_t
e1000_set_d0_lplu_state
(
struct
e1000_hw
*
hw
,
boolean_t
active
)
{
int32_t
ret_val
;
uint16_t
default_page
=
0
;
uint16_t
phy_data
;
DEBUGFUNC
(
"e1000_set_d0_lplu_state"
);
DEBUGFUNC
(
"e1000_set_vco_speed"
);
switch
(
hw
->
mac_type
)
{
case
e1000_82545_rev_3
:
case
e1000_82546_rev_3
:
break
;
default:
if
(
hw
->
mac_type
<=
e1000_82547_rev_2
)
return
E1000_SUCCESS
;
}
/* Set PHY register 30, page 5, bit 8 to 0 */
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_PAGE_SELECT
,
&
default_page
);
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP02E1000_PHY_POWER_MGMT
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_PAGE_SELECT
,
0x0005
);
if
(
ret_val
)
if
(
!
active
)
{
phy_data
&=
~
IGP02E1000_PM_D0_LPLU
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP02E1000_PHY_POWER_MGMT
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
&
phy_data
);
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
* Dx states where the power conservation is most important. During
* driver activity we should enable SmartSpeed, so performance is
* maintained. */
if
(
hw
->
smart_speed
==
e1000_smart_speed_on
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
phy_data
&=
~
M88E1000_PHY_VCO_REG_BIT8
;
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
phy_data
);
phy_data
|=
IGP01E1000_PSCFR_SMART_SPEED
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
/* Set PHY register 30, page 4, bit 11 to 1 */
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_PAGE_SELECT
,
0x0004
);
if
(
ret_val
)
}
else
if
(
hw
->
smart_speed
==
e1000_smart_speed_off
)
{
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
&
phy_data
);
phy_data
&=
~
IGP01E1000_PSCFR_SMART_SPEED
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
}
else
{
phy_data
|=
IGP02E1000_PM_D0_LPLU
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP02E1000_PHY_POWER_MGMT
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
/* When LPLU is enabled we should disable SmartSpeed */
ret_val
=
e1000_read_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
phy_data
&=
~
IGP01E1000_PSCFR_SMART_SPEED
;
ret_val
=
e1000_write_phy_reg
(
hw
,
IGP01E1000_PHY_PORT_CONFIG
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
}
return
E1000_SUCCESS
;
}
/******************************************************************************
* Change VCO speed register to improve Bit Error Rate performance of SERDES.
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static
int32_t
e1000_set_vco_speed
(
struct
e1000_hw
*
hw
)
{
int32_t
ret_val
;
uint16_t
default_page
=
0
;
uint16_t
phy_data
;
DEBUGFUNC
(
"e1000_set_vco_speed"
);
switch
(
hw
->
mac_type
)
{
case
e1000_82545_rev_3
:
case
e1000_82546_rev_3
:
break
;
default:
return
E1000_SUCCESS
;
}
/* Set PHY register 30, page 5, bit 8 to 0 */
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_PAGE_SELECT
,
&
default_page
);
if
(
ret_val
)
return
ret_val
;
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_PAGE_SELECT
,
0x0005
);
if
(
ret_val
)
return
ret_val
;
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
phy_data
&=
~
M88E1000_PHY_VCO_REG_BIT8
;
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
phy_data
);
if
(
ret_val
)
return
ret_val
;
/* Set PHY register 30, page 4, bit 11 to 1 */
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_PAGE_SELECT
,
0x0004
);
if
(
ret_val
)
return
ret_val
;
ret_val
=
e1000_read_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
&
phy_data
);
if
(
ret_val
)
return
ret_val
;
...
...
@@ -5318,6 +5931,338 @@ e1000_set_vco_speed(struct e1000_hw *hw)
return
E1000_SUCCESS
;
}
/*****************************************************************************
* This function reads the cookie from ARC ram.
*
* returns: - E1000_SUCCESS .
****************************************************************************/
int32_t
e1000_host_if_read_cookie
(
struct
e1000_hw
*
hw
,
uint8_t
*
buffer
)
{
uint8_t
i
;
uint32_t
offset
=
E1000_MNG_DHCP_COOKIE_OFFSET
;
uint8_t
length
=
E1000_MNG_DHCP_COOKIE_LENGTH
;
length
=
(
length
>>
2
);
offset
=
(
offset
>>
2
);
for
(
i
=
0
;
i
<
length
;
i
++
)
{
*
((
uint32_t
*
)
buffer
+
i
)
=
E1000_READ_REG_ARRAY_DWORD
(
hw
,
HOST_IF
,
offset
+
i
);
}
return
E1000_SUCCESS
;
}
/*****************************************************************************
* This function checks whether the HOST IF is enabled for command operaton
* and also checks whether the previous command is completed.
* It busy waits in case of previous command is not completed.
*
* returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
* timeout
* - E1000_SUCCESS for success.
****************************************************************************/
int32_t
e1000_mng_enable_host_if
(
struct
e1000_hw
*
hw
)
{
uint32_t
hicr
;
uint8_t
i
;
/* Check that the host interface is enabled. */
hicr
=
E1000_READ_REG
(
hw
,
HICR
);
if
((
hicr
&
E1000_HICR_EN
)
==
0
)
{
DEBUGOUT
(
"E1000_HOST_EN bit disabled.
\n
"
);
return
-
E1000_ERR_HOST_INTERFACE_COMMAND
;
}
/* check the previous command is completed */
for
(
i
=
0
;
i
<
E1000_MNG_DHCP_COMMAND_TIMEOUT
;
i
++
)
{
hicr
=
E1000_READ_REG
(
hw
,
HICR
);
if
(
!
(
hicr
&
E1000_HICR_C
))
break
;
msec_delay_irq
(
1
);
}
if
(
i
==
E1000_MNG_DHCP_COMMAND_TIMEOUT
)
{
DEBUGOUT
(
"Previous command timeout failed .
\n
"
);
return
-
E1000_ERR_HOST_INTERFACE_COMMAND
;
}
return
E1000_SUCCESS
;
}
/*****************************************************************************
* This function writes the buffer content at the offset given on the host if.
* It also does alignment considerations to do the writes in most efficient way.
* Also fills up the sum of the buffer in *buffer parameter.
*
* returns - E1000_SUCCESS for success.
****************************************************************************/
int32_t
e1000_mng_host_if_write
(
struct
e1000_hw
*
hw
,
uint8_t
*
buffer
,
uint16_t
length
,
uint16_t
offset
,
uint8_t
*
sum
)
{
uint8_t
*
tmp
;
uint8_t
*
bufptr
=
buffer
;
uint32_t
data
;
uint16_t
remaining
,
i
,
j
,
prev_bytes
;
/* sum = only sum of the data and it is not checksum */
if
(
length
==
0
||
offset
+
length
>
E1000_HI_MAX_MNG_DATA_LENGTH
)
{
return
-
E1000_ERR_PARAM
;
}
tmp
=
(
uint8_t
*
)
&
data
;
prev_bytes
=
offset
&
0x3
;
offset
&=
0xFFFC
;
offset
>>=
2
;
if
(
prev_bytes
)
{
data
=
E1000_READ_REG_ARRAY_DWORD
(
hw
,
HOST_IF
,
offset
);
for
(
j
=
prev_bytes
;
j
<
sizeof
(
uint32_t
);
j
++
)
{
*
(
tmp
+
j
)
=
*
bufptr
++
;
*
sum
+=
*
(
tmp
+
j
);
}
E1000_WRITE_REG_ARRAY_DWORD
(
hw
,
HOST_IF
,
offset
,
data
);
length
-=
j
-
prev_bytes
;
offset
++
;
}
remaining
=
length
&
0x3
;
length
-=
remaining
;
/* Calculate length in DWORDs */
length
>>=
2
;
/* The device driver writes the relevant command block into the
* ram area. */
for
(
i
=
0
;
i
<
length
;
i
++
)
{
for
(
j
=
0
;
j
<
sizeof
(
uint32_t
);
j
++
)
{
*
(
tmp
+
j
)
=
*
bufptr
++
;
*
sum
+=
*
(
tmp
+
j
);
}
E1000_WRITE_REG_ARRAY_DWORD
(
hw
,
HOST_IF
,
offset
+
i
,
data
);
}
if
(
remaining
)
{
for
(
j
=
0
;
j
<
sizeof
(
uint32_t
);
j
++
)
{
if
(
j
<
remaining
)
*
(
tmp
+
j
)
=
*
bufptr
++
;
else
*
(
tmp
+
j
)
=
0
;
*
sum
+=
*
(
tmp
+
j
);
}
E1000_WRITE_REG_ARRAY_DWORD
(
hw
,
HOST_IF
,
offset
+
i
,
data
);
}
return
E1000_SUCCESS
;
}
/*****************************************************************************
* This function writes the command header after does the checksum calculation.
*
* returns - E1000_SUCCESS for success.
****************************************************************************/
int32_t
e1000_mng_write_cmd_header
(
struct
e1000_hw
*
hw
,
struct
e1000_host_mng_command_header
*
hdr
)
{
uint16_t
i
;
uint8_t
sum
;
uint8_t
*
buffer
;
/* Write the whole command header structure which includes sum of
* the buffer */
uint16_t
length
=
sizeof
(
struct
e1000_host_mng_command_header
);
sum
=
hdr
->
checksum
;
hdr
->
checksum
=
0
;
buffer
=
(
uint8_t
*
)
hdr
;
i
=
length
;
while
(
i
--
)
sum
+=
buffer
[
i
];
hdr
->
checksum
=
0
-
sum
;
length
>>=
2
;
/* The device driver writes the relevant command block into the ram area. */
for
(
i
=
0
;
i
<
length
;
i
++
)
E1000_WRITE_REG_ARRAY_DWORD
(
hw
,
HOST_IF
,
i
,
*
((
uint32_t
*
)
hdr
+
i
));
return
E1000_SUCCESS
;
}
/*****************************************************************************
* This function indicates to ARC that a new command is pending which completes
* one write operation by the driver.
*
* returns - E1000_SUCCESS for success.
****************************************************************************/
int32_t
e1000_mng_write_commit
(
struct
e1000_hw
*
hw
)
{
uint32_t
hicr
;
hicr
=
E1000_READ_REG
(
hw
,
HICR
);
/* Setting this bit tells the ARC that a new command is pending. */
E1000_WRITE_REG
(
hw
,
HICR
,
hicr
|
E1000_HICR_C
);
return
E1000_SUCCESS
;
}
/*****************************************************************************
* This function checks the mode of the firmware.
*
* returns - TRUE when the mode is IAMT or FALSE.
****************************************************************************/
boolean_t
e1000_check_mng_mode
(
struct
e1000_hw
*
hw
)
{
uint32_t
fwsm
;
fwsm
=
E1000_READ_REG
(
hw
,
FWSM
);
if
((
fwsm
&
E1000_FWSM_MODE_MASK
)
==
(
E1000_MNG_IAMT_MODE
<<
E1000_FWSM_MODE_SHIFT
))
return
TRUE
;
return
FALSE
;
}
/*****************************************************************************
* This function writes the dhcp info .
****************************************************************************/
int32_t
e1000_mng_write_dhcp_info
(
struct
e1000_hw
*
hw
,
uint8_t
*
buffer
,
uint16_t
length
)
{
int32_t
ret_val
;
struct
e1000_host_mng_command_header
hdr
;
hdr
.
command_id
=
E1000_MNG_DHCP_TX_PAYLOAD_CMD
;
hdr
.
command_length
=
length
;
hdr
.
reserved1
=
0
;
hdr
.
reserved2
=
0
;
hdr
.
checksum
=
0
;
ret_val
=
e1000_mng_enable_host_if
(
hw
);
if
(
ret_val
==
E1000_SUCCESS
)
{
ret_val
=
e1000_mng_host_if_write
(
hw
,
buffer
,
length
,
sizeof
(
hdr
),
&
(
hdr
.
checksum
));
if
(
ret_val
==
E1000_SUCCESS
)
{
ret_val
=
e1000_mng_write_cmd_header
(
hw
,
&
hdr
);
if
(
ret_val
==
E1000_SUCCESS
)
ret_val
=
e1000_mng_write_commit
(
hw
);
}
}
return
ret_val
;
}
/*****************************************************************************
* This function calculates the checksum.
*
* returns - checksum of buffer contents.
****************************************************************************/
uint8_t
e1000_calculate_mng_checksum
(
char
*
buffer
,
uint32_t
length
)
{
uint8_t
sum
=
0
;
uint32_t
i
;
if
(
!
buffer
)
return
0
;
for
(
i
=
0
;
i
<
length
;
i
++
)
sum
+=
buffer
[
i
];
return
(
uint8_t
)
(
0
-
sum
);
}
/*****************************************************************************
* This function checks whether tx pkt filtering needs to be enabled or not.
*
* returns - TRUE for packet filtering or FALSE.
****************************************************************************/
boolean_t
e1000_enable_tx_pkt_filtering
(
struct
e1000_hw
*
hw
)
{
/* called in init as well as watchdog timer functions */
int32_t
ret_val
,
checksum
;
boolean_t
tx_filter
=
FALSE
;
struct
e1000_host_mng_dhcp_cookie
*
hdr
=
&
(
hw
->
mng_cookie
);
uint8_t
*
buffer
=
(
uint8_t
*
)
&
(
hw
->
mng_cookie
);
if
(
e1000_check_mng_mode
(
hw
))
{
ret_val
=
e1000_mng_enable_host_if
(
hw
);
if
(
ret_val
==
E1000_SUCCESS
)
{
ret_val
=
e1000_host_if_read_cookie
(
hw
,
buffer
);
if
(
ret_val
==
E1000_SUCCESS
)
{
checksum
=
hdr
->
checksum
;
hdr
->
checksum
=
0
;
if
((
hdr
->
signature
==
E1000_IAMT_SIGNATURE
)
&&
checksum
==
e1000_calculate_mng_checksum
((
char
*
)
buffer
,
E1000_MNG_DHCP_COOKIE_LENGTH
))
{
if
(
hdr
->
status
&
E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT
)
tx_filter
=
TRUE
;
}
else
tx_filter
=
TRUE
;
}
else
tx_filter
=
TRUE
;
}
}
hw
->
tx_pkt_filtering
=
tx_filter
;
return
tx_filter
;
}
/******************************************************************************
* Verifies the hardware needs to allow ARPs to be processed by the host
*
* hw - Struct containing variables accessed by shared code
*
* returns: - TRUE/FALSE
*
*****************************************************************************/
uint32_t
e1000_enable_mng_pass_thru
(
struct
e1000_hw
*
hw
)
{
uint32_t
manc
;
uint32_t
fwsm
,
factps
;
if
(
hw
->
asf_firmware_present
)
{
manc
=
E1000_READ_REG
(
hw
,
MANC
);
if
(
!
(
manc
&
E1000_MANC_RCV_TCO_EN
)
||
!
(
manc
&
E1000_MANC_EN_MAC_ADDR_FILTER
))
return
FALSE
;
if
(
e1000_arc_subsystem_valid
(
hw
)
==
TRUE
)
{
fwsm
=
E1000_READ_REG
(
hw
,
FWSM
);
factps
=
E1000_READ_REG
(
hw
,
FACTPS
);
if
(((
fwsm
&
E1000_FWSM_MODE_MASK
)
==
(
e1000_mng_mode_pt
<<
E1000_FWSM_MODE_SHIFT
))
&&
(
factps
&
E1000_FACTPS_MNGCG
))
return
TRUE
;
}
else
if
((
manc
&
E1000_MANC_SMBUS_EN
)
&&
!
(
manc
&
E1000_MANC_ASF_EN
))
return
TRUE
;
}
return
FALSE
;
}
static
int32_t
e1000_polarity_reversal_workaround
(
struct
e1000_hw
*
hw
)
{
...
...
@@ -5403,3 +6348,265 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
return
E1000_SUCCESS
;
}
/***************************************************************************
*
* Disables PCI-Express master access.
*
* hw: Struct containing variables accessed by shared code
*
* returns: - none.
*
***************************************************************************/
void
e1000_set_pci_express_master_disable
(
struct
e1000_hw
*
hw
)
{
uint32_t
ctrl
;
DEBUGFUNC
(
"e1000_set_pci_express_master_disable"
);
if
(
hw
->
bus_type
!=
e1000_bus_type_pci_express
)
return
;
ctrl
=
E1000_READ_REG
(
hw
,
CTRL
);
ctrl
|=
E1000_CTRL_GIO_MASTER_DISABLE
;
E1000_WRITE_REG
(
hw
,
CTRL
,
ctrl
);
}
/***************************************************************************
*
* Enables PCI-Express master access.
*
* hw: Struct containing variables accessed by shared code
*
* returns: - none.
*
***************************************************************************/
void
e1000_enable_pciex_master
(
struct
e1000_hw
*
hw
)
{
uint32_t
ctrl
;
DEBUGFUNC
(
"e1000_enable_pciex_master"
);
if
(
hw
->
bus_type
!=
e1000_bus_type_pci_express
)
return
;
ctrl
=
E1000_READ_REG
(
hw
,
CTRL
);
ctrl
&=
~
E1000_CTRL_GIO_MASTER_DISABLE
;
E1000_WRITE_REG
(
hw
,
CTRL
,
ctrl
);
}
/*******************************************************************************
*
* Disables PCI-Express master access and verifies there are no pending requests
*
* hw: Struct containing variables accessed by shared code
*
* returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
* caused the master requests to be disabled.
* E1000_SUCCESS master requests disabled.
*
******************************************************************************/
int32_t
e1000_disable_pciex_master
(
struct
e1000_hw
*
hw
)
{
int32_t
timeout
=
MASTER_DISABLE_TIMEOUT
;
/* 80ms */
DEBUGFUNC
(
"e1000_disable_pciex_master"
);
if
(
hw
->
bus_type
!=
e1000_bus_type_pci_express
)
return
E1000_SUCCESS
;
e1000_set_pci_express_master_disable
(
hw
);
while
(
timeout
)
{
if
(
!
(
E1000_READ_REG
(
hw
,
STATUS
)
&
E1000_STATUS_GIO_MASTER_ENABLE
))
break
;
else
udelay
(
100
);
timeout
--
;
}
if
(
!
timeout
)
{
DEBUGOUT
(
"Master requests are pending.
\n
"
);
return
-
E1000_ERR_MASTER_REQUESTS_PENDING
;
}
return
E1000_SUCCESS
;
}
/*******************************************************************************
*
* Check for EEPROM Auto Read bit done.
*
* hw: Struct containing variables accessed by shared code
*
* returns: - E1000_ERR_RESET if fail to reset MAC
* E1000_SUCCESS at any other case.
*
******************************************************************************/
int32_t
e1000_get_auto_rd_done
(
struct
e1000_hw
*
hw
)
{
int32_t
timeout
=
AUTO_READ_DONE_TIMEOUT
;
DEBUGFUNC
(
"e1000_get_auto_rd_done"
);
switch
(
hw
->
mac_type
)
{
default:
msec_delay
(
5
);
break
;
case
e1000_82573
:
while
(
timeout
)
{
if
(
E1000_READ_REG
(
hw
,
EECD
)
&
E1000_EECD_AUTO_RD
)
break
;
else
msec_delay
(
1
);
timeout
--
;
}
if
(
!
timeout
)
{
DEBUGOUT
(
"Auto read by HW from EEPROM has not completed.
\n
"
);
return
-
E1000_ERR_RESET
;
}
break
;
}
return
E1000_SUCCESS
;
}
/***************************************************************************
* Checks if the PHY configuration is done
*
* hw: Struct containing variables accessed by shared code
*
* returns: - E1000_ERR_RESET if fail to reset MAC
* E1000_SUCCESS at any other case.
*
***************************************************************************/
int32_t
e1000_get_phy_cfg_done
(
struct
e1000_hw
*
hw
)
{
DEBUGFUNC
(
"e1000_get_phy_cfg_done"
);
/* Simply wait for 10ms */
msec_delay
(
10
);
return
E1000_SUCCESS
;
}
/***************************************************************************
*
* Using the combination of SMBI and SWESMBI semaphore bits when resetting
* adapter or Eeprom access.
*
* hw: Struct containing variables accessed by shared code
*
* returns: - E1000_ERR_EEPROM if fail to access EEPROM.
* E1000_SUCCESS at any other case.
*
***************************************************************************/
int32_t
e1000_get_hw_eeprom_semaphore
(
struct
e1000_hw
*
hw
)
{
int32_t
timeout
;
uint32_t
swsm
;
DEBUGFUNC
(
"e1000_get_hw_eeprom_semaphore"
);
if
(
!
hw
->
eeprom_semaphore_present
)
return
E1000_SUCCESS
;
/* Get the FW semaphore. */
timeout
=
hw
->
eeprom
.
word_size
+
1
;
while
(
timeout
)
{
swsm
=
E1000_READ_REG
(
hw
,
SWSM
);
swsm
|=
E1000_SWSM_SWESMBI
;
E1000_WRITE_REG
(
hw
,
SWSM
,
swsm
);
/* if we managed to set the bit we got the semaphore. */
swsm
=
E1000_READ_REG
(
hw
,
SWSM
);
if
(
swsm
&
E1000_SWSM_SWESMBI
)
break
;
udelay
(
50
);
timeout
--
;
}
if
(
!
timeout
)
{
/* Release semaphores */
e1000_put_hw_eeprom_semaphore
(
hw
);
DEBUGOUT
(
"Driver can't access the Eeprom - SWESMBI bit is set.
\n
"
);
return
-
E1000_ERR_EEPROM
;
}
return
E1000_SUCCESS
;
}
/***************************************************************************
* This function clears HW semaphore bits.
*
* hw: Struct containing variables accessed by shared code
*
* returns: - None.
*
***************************************************************************/
void
e1000_put_hw_eeprom_semaphore
(
struct
e1000_hw
*
hw
)
{
uint32_t
swsm
;
DEBUGFUNC
(
"e1000_put_hw_eeprom_semaphore"
);
if
(
!
hw
->
eeprom_semaphore_present
)
return
;
swsm
=
E1000_READ_REG
(
hw
,
SWSM
);
/* Release both semaphores. */
swsm
&=
~
(
E1000_SWSM_SMBI
|
E1000_SWSM_SWESMBI
);
E1000_WRITE_REG
(
hw
,
SWSM
,
swsm
);
}
/******************************************************************************
* Checks if PHY reset is blocked due to SOL/IDER session, for example.
* Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to
* the caller to figure out how to deal with it.
*
* hw - Struct containing variables accessed by shared code
*
* returns: - E1000_BLK_PHY_RESET
* E1000_SUCCESS
*
*****************************************************************************/
int32_t
e1000_check_phy_reset_block
(
struct
e1000_hw
*
hw
)
{
uint32_t
manc
=
0
;
if
(
hw
->
mac_type
>
e1000_82547_rev_2
)
manc
=
E1000_READ_REG
(
hw
,
MANC
);
return
(
manc
&
E1000_MANC_BLK_PHY_RST_ON_IDE
)
?
E1000_BLK_PHY_RESET
:
E1000_SUCCESS
;
}
uint8_t
e1000_arc_subsystem_valid
(
struct
e1000_hw
*
hw
)
{
uint32_t
fwsm
;
/* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
* may not be provided a DMA clock when no manageability features are
* enabled. We do not want to perform any reads/writes to these registers
* if this is the case. We read FWSM to determine the manageability mode.
*/
switch
(
hw
->
mac_type
)
{
case
e1000_82573
:
fwsm
=
E1000_READ_REG
(
hw
,
FWSM
);
if
((
fwsm
&
E1000_FWSM_MODE_MASK
)
!=
0
)
return
TRUE
;
break
;
default:
break
;
}
return
FALSE
;
}
drivers/net/e1000/e1000_hw.h
View file @
1b981021
/*******************************************************************************
Copyright(c) 1999 - 200
4
Intel Corporation. All rights reserved.
Copyright(c) 1999 - 200
5
Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
...
...
@@ -57,6 +57,7 @@ typedef enum {
e1000_82541_rev_2
,
e1000_82547
,
e1000_82547_rev_2
,
e1000_82573
,
e1000_num_macs
}
e1000_mac_type
;
...
...
@@ -64,6 +65,7 @@ typedef enum {
e1000_eeprom_uninitialized
=
0
,
e1000_eeprom_spi
,
e1000_eeprom_microwire
,
e1000_eeprom_flash
,
e1000_num_eeprom_types
}
e1000_eeprom_type
;
...
...
@@ -96,6 +98,7 @@ typedef enum {
e1000_bus_type_unknown
=
0
,
e1000_bus_type_pci
,
e1000_bus_type_pcix
,
e1000_bus_type_pci_express
,
e1000_bus_type_reserved
}
e1000_bus_type
;
...
...
@@ -107,6 +110,7 @@ typedef enum {
e1000_bus_speed_100
,
e1000_bus_speed_120
,
e1000_bus_speed_133
,
e1000_bus_speed_2500
,
e1000_bus_speed_reserved
}
e1000_bus_speed
;
...
...
@@ -115,6 +119,8 @@ typedef enum {
e1000_bus_width_unknown
=
0
,
e1000_bus_width_32
,
e1000_bus_width_64
,
e1000_bus_width_pciex_1
,
e1000_bus_width_pciex_4
,
e1000_bus_width_reserved
}
e1000_bus_width
;
...
...
@@ -196,6 +202,7 @@ typedef enum {
typedef
enum
{
e1000_phy_m88
=
0
,
e1000_phy_igp
,
e1000_phy_igp_2
,
e1000_phy_undefined
=
0xFF
}
e1000_phy_type
;
...
...
@@ -242,8 +249,19 @@ struct e1000_eeprom_info {
uint16_t
address_bits
;
uint16_t
delay_usec
;
uint16_t
page_size
;
boolean_t
use_eerd
;
boolean_t
use_eewr
;
};
/* Flex ASF Information */
#define E1000_HOST_IF_MAX_SIZE 2048
typedef
enum
{
e1000_byte_align
=
0
,
e1000_word_align
=
1
,
e1000_dword_align
=
2
}
e1000_align_type
;
/* Error Codes */
...
...
@@ -254,11 +272,16 @@ struct e1000_eeprom_info {
#define E1000_ERR_PARAM 4
#define E1000_ERR_MAC_TYPE 5
#define E1000_ERR_PHY_TYPE 6
#define E1000_ERR_RESET 9
#define E1000_ERR_MASTER_REQUESTS_PENDING 10
#define E1000_ERR_HOST_INTERFACE_COMMAND 11
#define E1000_BLK_PHY_RESET 12
/* Function prototypes */
/* Initialization */
int32_t
e1000_reset_hw
(
struct
e1000_hw
*
hw
);
int32_t
e1000_init_hw
(
struct
e1000_hw
*
hw
);
int32_t
e1000_id_led_init
(
struct
e1000_hw
*
hw
);
int32_t
e1000_set_mac_type
(
struct
e1000_hw
*
hw
);
void
e1000_set_media_type
(
struct
e1000_hw
*
hw
);
...
...
@@ -275,7 +298,7 @@ int32_t e1000_force_mac_fc(struct e1000_hw *hw);
/* PHY */
int32_t
e1000_read_phy_reg
(
struct
e1000_hw
*
hw
,
uint32_t
reg_addr
,
uint16_t
*
phy_data
);
int32_t
e1000_write_phy_reg
(
struct
e1000_hw
*
hw
,
uint32_t
reg_addr
,
uint16_t
data
);
void
e1000_phy_hw_reset
(
struct
e1000_hw
*
hw
);
int32_t
e1000_phy_hw_reset
(
struct
e1000_hw
*
hw
);
int32_t
e1000_phy_reset
(
struct
e1000_hw
*
hw
);
int32_t
e1000_detect_gig_phy
(
struct
e1000_hw
*
hw
);
int32_t
e1000_phy_get_info
(
struct
e1000_hw
*
hw
,
struct
e1000_phy_info
*
phy_info
);
...
...
@@ -287,13 +310,86 @@ int32_t e1000_check_downshift(struct e1000_hw *hw);
int32_t
e1000_validate_mdi_setting
(
struct
e1000_hw
*
hw
);
/* EEPROM Functions */
void
e1000_init_eeprom_params
(
struct
e1000_hw
*
hw
);
int32_t
e1000_init_eeprom_params
(
struct
e1000_hw
*
hw
);
boolean_t
e1000_is_onboard_nvm_eeprom
(
struct
e1000_hw
*
hw
);
int32_t
e1000_read_eeprom_eerd
(
struct
e1000_hw
*
hw
,
uint16_t
offset
,
uint16_t
words
,
uint16_t
*
data
);
int32_t
e1000_write_eeprom_eewr
(
struct
e1000_hw
*
hw
,
uint16_t
offset
,
uint16_t
words
,
uint16_t
*
data
);
int32_t
e1000_poll_eerd_eewr_done
(
struct
e1000_hw
*
hw
,
int
eerd
);
/* MNG HOST IF functions */
uint32_t
e1000_enable_mng_pass_thru
(
struct
e1000_hw
*
hw
);
#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
/* Host Interface data length */
#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
/* Time in ms to process MNG command */
#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
/* Cookie offset */
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
/* Cookie length */
#define E1000_MNG_IAMT_MODE 0x3
#define E1000_IAMT_SIGNATURE 0x544D4149
/* Intel(R) Active Management Technology signature */
#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1
/* DHCP parsing enabled */
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2
/* DHCP parsing enabled */
#define E1000_VFTA_ENTRY_SHIFT 0x5
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
struct
e1000_host_mng_command_header
{
uint8_t
command_id
;
uint8_t
checksum
;
uint16_t
reserved1
;
uint16_t
reserved2
;
uint16_t
command_length
;
};
struct
e1000_host_mng_command_info
{
struct
e1000_host_mng_command_header
command_header
;
/* Command Head/Command Result Head has 4 bytes */
uint8_t
command_data
[
E1000_HI_MAX_MNG_DATA_LENGTH
];
/* Command data can length 0..0x658*/
};
#ifdef __BIG_ENDIAN
struct
e1000_host_mng_dhcp_cookie
{
uint32_t
signature
;
uint16_t
vlan_id
;
uint8_t
reserved0
;
uint8_t
status
;
uint32_t
reserved1
;
uint8_t
checksum
;
uint8_t
reserved3
;
uint16_t
reserved2
;
};
#else
struct
e1000_host_mng_dhcp_cookie
{
uint32_t
signature
;
uint8_t
status
;
uint8_t
reserved0
;
uint16_t
vlan_id
;
uint32_t
reserved1
;
uint16_t
reserved2
;
uint8_t
reserved3
;
uint8_t
checksum
;
};
#endif
int32_t
e1000_mng_write_dhcp_info
(
struct
e1000_hw
*
hw
,
uint8_t
*
buffer
,
uint16_t
length
);
boolean_t
e1000_check_mng_mode
(
struct
e1000_hw
*
hw
);
boolean_t
e1000_enable_tx_pkt_filtering
(
struct
e1000_hw
*
hw
);
int32_t
e1000_mng_enable_host_if
(
struct
e1000_hw
*
hw
);
int32_t
e1000_mng_host_if_write
(
struct
e1000_hw
*
hw
,
uint8_t
*
buffer
,
uint16_t
length
,
uint16_t
offset
,
uint8_t
*
sum
);
int32_t
e1000_mng_write_cmd_header
(
struct
e1000_hw
*
hw
,
struct
e1000_host_mng_command_header
*
hdr
);
int32_t
e1000_mng_write_commit
(
struct
e1000_hw
*
hw
);
int32_t
e1000_read_eeprom
(
struct
e1000_hw
*
hw
,
uint16_t
reg
,
uint16_t
words
,
uint16_t
*
data
);
int32_t
e1000_validate_eeprom_checksum
(
struct
e1000_hw
*
hw
);
int32_t
e1000_update_eeprom_checksum
(
struct
e1000_hw
*
hw
);
int32_t
e1000_write_eeprom
(
struct
e1000_hw
*
hw
,
uint16_t
reg
,
uint16_t
words
,
uint16_t
*
data
);
int32_t
e1000_read_part_num
(
struct
e1000_hw
*
hw
,
uint32_t
*
part_num
);
int32_t
e1000_read_mac_addr
(
struct
e1000_hw
*
hw
);
int32_t
e1000_swfw_sync_acquire
(
struct
e1000_hw
*
hw
,
uint16_t
mask
);
void
e1000_swfw_sync_release
(
struct
e1000_hw
*
hw
,
uint16_t
mask
);
/* Filters (multicast, vlan, receive) */
void
e1000_init_rx_addrs
(
struct
e1000_hw
*
hw
);
...
...
@@ -313,7 +409,6 @@ int32_t e1000_led_off(struct e1000_hw *hw);
/* Adaptive IFS Functions */
/* Everything else */
uint32_t
e1000_enable_mng_pass_thru
(
struct
e1000_hw
*
hw
);
void
e1000_clear_hw_cntrs
(
struct
e1000_hw
*
hw
);
void
e1000_reset_adaptive
(
struct
e1000_hw
*
hw
);
void
e1000_update_adaptive
(
struct
e1000_hw
*
hw
);
...
...
@@ -330,6 +425,19 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
void
e1000_write_reg_io
(
struct
e1000_hw
*
hw
,
uint32_t
offset
,
uint32_t
value
);
int32_t
e1000_config_dsp_after_link_change
(
struct
e1000_hw
*
hw
,
boolean_t
link_up
);
int32_t
e1000_set_d3_lplu_state
(
struct
e1000_hw
*
hw
,
boolean_t
active
);
int32_t
e1000_set_d0_lplu_state
(
struct
e1000_hw
*
hw
,
boolean_t
active
);
void
e1000_set_pci_express_master_disable
(
struct
e1000_hw
*
hw
);
void
e1000_enable_pciex_master
(
struct
e1000_hw
*
hw
);
int32_t
e1000_disable_pciex_master
(
struct
e1000_hw
*
hw
);
int32_t
e1000_get_auto_rd_done
(
struct
e1000_hw
*
hw
);
int32_t
e1000_get_phy_cfg_done
(
struct
e1000_hw
*
hw
);
int32_t
e1000_get_software_semaphore
(
struct
e1000_hw
*
hw
);
void
e1000_release_software_semaphore
(
struct
e1000_hw
*
hw
);
int32_t
e1000_check_phy_reset_block
(
struct
e1000_hw
*
hw
);
int32_t
e1000_get_hw_eeprom_semaphore
(
struct
e1000_hw
*
hw
);
void
e1000_put_hw_eeprom_semaphore
(
struct
e1000_hw
*
hw
);
int32_t
e1000_commit_shadow_ram
(
struct
e1000_hw
*
hw
);
uint8_t
e1000_arc_subsystem_valid
(
struct
e1000_hw
*
hw
);
#define E1000_READ_REG_IO(a, reg) \
e1000_read_reg_io((a), E1000_##reg)
...
...
@@ -369,6 +477,10 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
#define E1000_DEV_ID_82546GB_SERDES 0x107B
#define E1000_DEV_ID_82546GB_PCIE 0x108A
#define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
...
...
@@ -381,6 +493,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
#define E1000_REVISION_3 3
#define SPEED_10 10
#define SPEED_100 100
...
...
@@ -437,6 +550,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
E1000_IMS_RXSEQ | \
E1000_IMS_LSC)
/* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor. We
* reserve one of these spots for our directed address, allowing us room for
...
...
@@ -457,14 +571,74 @@ struct e1000_rx_desc {
uint16_t
special
;
};
/* Receive Descriptor - Extended */
union
e1000_rx_desc_extended
{
struct
{
uint64_t
buffer_addr
;
uint64_t
reserved
;
}
read
;
struct
{
struct
{
uint32_t
mrq
;
/* Multiple Rx Queues */
union
{
uint32_t
rss
;
/* RSS Hash */
struct
{
uint16_t
ip_id
;
/* IP id */
uint16_t
csum
;
/* Packet Checksum */
}
csum_ip
;
}
hi_dword
;
}
lower
;
struct
{
uint32_t
status_error
;
/* ext status/error */
uint16_t
length
;
uint16_t
vlan
;
/* VLAN tag */
}
upper
;
}
wb
;
/* writeback */
};
#define MAX_PS_BUFFERS 4
/* Receive Descriptor - Packet Split */
union
e1000_rx_desc_packet_split
{
struct
{
/* one buffer for protocol header(s), three data buffers */
uint64_t
buffer_addr
[
MAX_PS_BUFFERS
];
}
read
;
struct
{
struct
{
uint32_t
mrq
;
/* Multiple Rx Queues */
union
{
uint32_t
rss
;
/* RSS Hash */
struct
{
uint16_t
ip_id
;
/* IP id */
uint16_t
csum
;
/* Packet Checksum */
}
csum_ip
;
}
hi_dword
;
}
lower
;
struct
{
uint32_t
status_error
;
/* ext status/error */
uint16_t
length0
;
/* length of buffer 0 */
uint16_t
vlan
;
/* VLAN tag */
}
middle
;
struct
{
uint16_t
header_status
;
uint16_t
length
[
3
];
/* length of buffers 1-3 */
}
upper
;
uint64_t
reserved
;
}
wb
;
/* writeback */
};
/* Receive Decriptor bit definitions */
#define E1000_RXD_STAT_DD 0x01
/* Descriptor Done */
#define E1000_RXD_STAT_EOP 0x02
/* End of Packet */
#define E1000_RXD_STAT_IXSM 0x04
/* Ignore checksum */
#define E1000_RXD_STAT_VP 0x08
/* IEEE VLAN Packet */
#define E1000_RXD_STAT_UDPCS 0x10
/* UDP xsum caculated */
#define E1000_RXD_STAT_TCPCS 0x20
/* TCP xsum calculated */
#define E1000_RXD_STAT_IPCS 0x40
/* IP xsum calculated */
#define E1000_RXD_STAT_PIF 0x80
/* passed in-exact filter */
#define E1000_RXD_STAT_IPIDV 0x200
/* IP identification valid */
#define E1000_RXD_STAT_UDPV 0x400
/* Valid UDP checksum */
#define E1000_RXD_STAT_ACK 0x8000
/* ACK Packet indication */
#define E1000_RXD_ERR_CE 0x01
/* CRC Error */
#define E1000_RXD_ERR_SE 0x02
/* Symbol Error */
#define E1000_RXD_ERR_SEQ 0x04
/* Sequence Error */
...
...
@@ -474,9 +648,20 @@ struct e1000_rx_desc {
#define E1000_RXD_ERR_RXE 0x80
/* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF
/* VLAN ID is in lower 12 bits */
#define E1000_RXD_SPC_PRI_MASK 0xE000
/* Priority is in upper 3 bits */
#define E1000_RXD_SPC_PRI_SHIFT
0x000D
/* Priority is in upper 3 of 16 */
#define E1000_RXD_SPC_PRI_SHIFT
13
#define E1000_RXD_SPC_CFI_MASK 0x1000
/* CFI is bit 12 */
#define E1000_RXD_SPC_CFI_SHIFT 0x000C
/* CFI is bit 12 */
#define E1000_RXD_SPC_CFI_SHIFT 12
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
#define E1000_RXDEXT_STATERR_CXE 0x10000000
#define E1000_RXDEXT_STATERR_TCPE 0x20000000
#define E1000_RXDEXT_STATERR_IPE 0x40000000
#define E1000_RXDEXT_STATERR_RXE 0x80000000
#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
/* mask to determine if packets should be dropped due to frame errors */
#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
...
...
@@ -486,6 +671,15 @@ struct e1000_rx_desc {
E1000_RXD_ERR_CXE | \
E1000_RXD_ERR_RXE)
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
E1000_RXDEXT_STATERR_CE | \
E1000_RXDEXT_STATERR_SE | \
E1000_RXDEXT_STATERR_SEQ | \
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
/* Transmit Descriptor */
struct
e1000_tx_desc
{
uint64_t
buffer_addr
;
/* Address of the descriptor's data buffer */
...
...
@@ -667,6 +861,7 @@ struct e1000_ffvt_entry {
#define E1000_ICS 0x000C8
/* Interrupt Cause Set - WO */
#define E1000_IMS 0x000D0
/* Interrupt Mask Set - RW */
#define E1000_IMC 0x000D8
/* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0
/* Interrupt Acknowledge Auto Mask */
#define E1000_RCTL 0x00100
/* RX Control - RW */
#define E1000_FCTTV 0x00170
/* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178
/* TX Configuration Word - RW */
...
...
@@ -676,9 +871,23 @@ struct e1000_ffvt_entry {
#define E1000_TBT 0x00448
/* TX Burst Timer - RW */
#define E1000_AIT 0x00458
/* Adaptive Interframe Spacing Throttle - RW */
#define E1000_LEDCTL 0x00E00
/* LED Control - RW */
#define E1000_EXTCNF_CTRL 0x00F00
/* Extended Configuration Control */
#define E1000_EXTCNF_SIZE 0x00F08
/* Extended Configuration Size */
#define E1000_PBA 0x01000
/* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008
/* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010
/* MNG EEprom Control */
#define E1000_FLASH_UPDATES 1000
#define E1000_EEARBC 0x01024
/* EEPROM Auto Read Bus Control */
#define E1000_FLASHT 0x01028
/* FLASH Timer Register */
#define E1000_EEWR 0x0102C
/* EEPROM Write Register - RW */
#define E1000_FLSWCTL 0x01030
/* FLASH control register */
#define E1000_FLSWDATA 0x01034
/* FLASH data register */
#define E1000_FLSWCNT 0x01038
/* FLASH Access Counter */
#define E1000_FLOP 0x0103C
/* FLASH Opcode Register */
#define E1000_ERT 0x02008
/* Early Rx Threshold - RW */
#define E1000_FCRTL 0x02160
/* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168
/* Flow Control Receive Threshold High - RW */
#define E1000_PSRCTL 0x02170
/* Packet Split Receive Control - RW */
#define E1000_RDBAL 0x02800
/* RX Descriptor Base Address Low - RW */
#define E1000_RDBAH 0x02804
/* RX Descriptor Base Address High - RW */
#define E1000_RDLEN 0x02808
/* RX Descriptor Length - RW */
...
...
@@ -688,6 +897,7 @@ struct e1000_ffvt_entry {
#define E1000_RXDCTL 0x02828
/* RX Descriptor Control - RW */
#define E1000_RADV 0x0282C
/* RX Interrupt Absolute Delay Timer - RW */
#define E1000_RSRPD 0x02C00
/* RX Small Packet Detect - RW */
#define E1000_RAID 0x02C08
/* Receive Ack Interrupt Delay - RW */
#define E1000_TXDMAC 0x03000
/* TX DMA Control - RW */
#define E1000_TDFH 0x03410
/* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418
/* TX Data FIFO Tail - RW */
...
...
@@ -703,6 +913,14 @@ struct e1000_ffvt_entry {
#define E1000_TXDCTL 0x03828
/* TX Descriptor Control - RW */
#define E1000_TADV 0x0382C
/* TX Interrupt Absolute Delay Val - RW */
#define E1000_TSPMT 0x03830
/* TCP Segmentation PAD & Min Threshold - RW */
#define E1000_TARC0 0x03840
/* TX Arbitration Count (0) */
#define E1000_TDBAL1 0x03900
/* TX Desc Base Address Low (1) - RW */
#define E1000_TDBAH1 0x03904
/* TX Desc Base Address High (1) - RW */
#define E1000_TDLEN1 0x03908
/* TX Desc Length (1) - RW */
#define E1000_TDH1 0x03910
/* TX Desc Head (1) - RW */
#define E1000_TDT1 0x03918
/* TX Desc Tail (1) - RW */
#define E1000_TXDCTL1 0x03928
/* TX Descriptor Control (1) - RW */
#define E1000_TARC1 0x03940
/* TX Arbitration Count (1) */
#define E1000_CRCERRS 0x04000
/* CRC Error Count - R/clr */
#define E1000_ALGNERRC 0x04004
/* Alignment Error Count - R/clr */
#define E1000_SYMERRS 0x04008
/* Symbol Error Count - R/clr */
...
...
@@ -761,7 +979,17 @@ struct e1000_ffvt_entry {
#define E1000_BPTC 0x040F4
/* Broadcast Packets TX Count - R/clr */
#define E1000_TSCTC 0x040F8
/* TCP Segmentation Context TX - R/clr */
#define E1000_TSCTFC 0x040FC
/* TCP Segmentation Context TX Fail - R/clr */
#define E1000_IAC 0x4100
/* Interrupt Assertion Count */
#define E1000_ICRXPTC 0x4104
/* Interrupt Cause Rx Packet Timer Expire Count */
#define E1000_ICRXATC 0x4108
/* Interrupt Cause Rx Absolute Timer Expire Count */
#define E1000_ICTXPTC 0x410C
/* Interrupt Cause Tx Packet Timer Expire Count */
#define E1000_ICTXATC 0x4110
/* Interrupt Cause Tx Absolute Timer Expire Count */
#define E1000_ICTXQEC 0x4118
/* Interrupt Cause Tx Queue Empty Count */
#define E1000_ICTXQMTC 0x411C
/* Interrupt Cause Tx Queue Minimum Threshold Count */
#define E1000_ICRXDMTC 0x4120
/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
#define E1000_ICRXOC 0x4124
/* Interrupt Cause Receiver Overrun Count */
#define E1000_RXCSUM 0x05000
/* RX Checksum Control - RW */
#define E1000_RFCTL 0x05008
/* Receive Filter Control*/
#define E1000_MTA 0x05200
/* Multicast Table Array - RW Array */
#define E1000_RA 0x05400
/* Receive Address - RW Array */
#define E1000_VFTA 0x05600
/* VLAN Filter Table Array - RW Array */
...
...
@@ -779,6 +1007,16 @@ struct e1000_ffvt_entry {
#define E1000_FFMT 0x09000
/* Flexible Filter Mask Table - RW Array */
#define E1000_FFVT 0x09800
/* Flexible Filter Value Table - RW Array */
#define E1000_GCR 0x05B00
/* PCI-Ex Control */
#define E1000_GSCL_1 0x05B10
/* PCI-Ex Statistic Control #1 */
#define E1000_GSCL_2 0x05B14
/* PCI-Ex Statistic Control #2 */
#define E1000_GSCL_3 0x05B18
/* PCI-Ex Statistic Control #3 */
#define E1000_GSCL_4 0x05B1C
/* PCI-Ex Statistic Control #4 */
#define E1000_FACTPS 0x05B30
/* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50
/* SW Semaphore */
#define E1000_FWSM 0x05B54
/* FW Semaphore */
#define E1000_FFLT_DBG 0x05F04
/* Debug Register */
#define E1000_HICR 0x08F00
/* Host Inteface Control */
/* Register Set (82542)
*
* Some of the 82542 registers are located at different offsets than they are
...
...
@@ -829,6 +1067,18 @@ struct e1000_ffvt_entry {
#define E1000_82542_VFTA 0x00600
#define E1000_82542_LEDCTL E1000_LEDCTL
#define E1000_82542_PBA E1000_PBA
#define E1000_82542_PBS E1000_PBS
#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
#define E1000_82542_EEARBC E1000_EEARBC
#define E1000_82542_FLASHT E1000_FLASHT
#define E1000_82542_EEWR E1000_EEWR
#define E1000_82542_FLSWCTL E1000_FLSWCTL
#define E1000_82542_FLSWDATA E1000_FLSWDATA
#define E1000_82542_FLSWCNT E1000_FLSWCNT
#define E1000_82542_FLOP E1000_FLOP
#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
#define E1000_82542_ERT E1000_ERT
#define E1000_82542_RXDCTL E1000_RXDCTL
#define E1000_82542_RADV E1000_RADV
#define E1000_82542_RSRPD E1000_RSRPD
...
...
@@ -913,6 +1163,38 @@ struct e1000_ffvt_entry {
#define E1000_82542_FFMT E1000_FFMT
#define E1000_82542_FFVT E1000_FFVT
#define E1000_82542_HOST_IF E1000_HOST_IF
#define E1000_82542_IAM E1000_IAM
#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
#define E1000_82542_PSRCTL E1000_PSRCTL
#define E1000_82542_RAID E1000_RAID
#define E1000_82542_TARC0 E1000_TARC0
#define E1000_82542_TDBAL1 E1000_TDBAL1
#define E1000_82542_TDBAH1 E1000_TDBAH1
#define E1000_82542_TDLEN1 E1000_TDLEN1
#define E1000_82542_TDH1 E1000_TDH1
#define E1000_82542_TDT1 E1000_TDT1
#define E1000_82542_TXDCTL1 E1000_TXDCTL1
#define E1000_82542_TARC1 E1000_TARC1
#define E1000_82542_RFCTL E1000_RFCTL
#define E1000_82542_GCR E1000_GCR
#define E1000_82542_GSCL_1 E1000_GSCL_1
#define E1000_82542_GSCL_2 E1000_GSCL_2
#define E1000_82542_GSCL_3 E1000_GSCL_3
#define E1000_82542_GSCL_4 E1000_GSCL_4
#define E1000_82542_FACTPS E1000_FACTPS
#define E1000_82542_SWSM E1000_SWSM
#define E1000_82542_FWSM E1000_FWSM
#define E1000_82542_FFLT_DBG E1000_FFLT_DBG
#define E1000_82542_IAC E1000_IAC
#define E1000_82542_ICRXPTC E1000_ICRXPTC
#define E1000_82542_ICRXATC E1000_ICRXATC
#define E1000_82542_ICTXPTC E1000_ICTXPTC
#define E1000_82542_ICTXATC E1000_ICTXATC
#define E1000_82542_ICTXQEC E1000_ICTXQEC
#define E1000_82542_ICTXQMTC E1000_ICTXQMTC
#define E1000_82542_ICRXDMTC E1000_ICRXDMTC
#define E1000_82542_ICRXOC E1000_ICRXOC
#define E1000_82542_HICR E1000_HICR
/* Statistics counters collected by the MAC */
struct
e1000_hw_stats
{
...
...
@@ -974,11 +1256,21 @@ struct e1000_hw_stats {
uint64_t
bptc
;
uint64_t
tsctc
;
uint64_t
tsctfc
;
uint64_t
iac
;
uint64_t
icrxptc
;
uint64_t
icrxatc
;
uint64_t
ictxptc
;
uint64_t
ictxatc
;
uint64_t
ictxqec
;
uint64_t
ictxqmtc
;
uint64_t
icrxdmtc
;
uint64_t
icrxoc
;
};
/* Structure containing variables used by the shared code (e1000_hw.c) */
struct
e1000_hw
{
uint8_t
__iomem
*
hw_addr
;
uint8_t
*
hw_addr
;
uint8_t
*
flash_address
;
e1000_mac_type
mac_type
;
e1000_phy_type
phy_type
;
uint32_t
phy_init_script
;
...
...
@@ -993,6 +1285,7 @@ struct e1000_hw {
e1000_ms_type
original_master_slave
;
e1000_ffe_config
ffe_config_state
;
uint32_t
asf_firmware_present
;
uint32_t
eeprom_semaphore_present
;
unsigned
long
io_base
;
uint32_t
phy_id
;
uint32_t
phy_revision
;
...
...
@@ -1009,6 +1302,8 @@ struct e1000_hw {
uint32_t
ledctl_default
;
uint32_t
ledctl_mode1
;
uint32_t
ledctl_mode2
;
boolean_t
tx_pkt_filtering
;
struct
e1000_host_mng_dhcp_cookie
mng_cookie
;
uint16_t
phy_spd_default
;
uint16_t
autoneg_advertised
;
uint16_t
pci_cmd_word
;
...
...
@@ -1047,16 +1342,24 @@ struct e1000_hw {
boolean_t
adaptive_ifs
;
boolean_t
ifs_params_forced
;
boolean_t
in_ifs_mode
;
boolean_t
mng_reg_access_disabled
;
};
#define E1000_EEPROM_SWDPIN0 0x0001
/* SWDPIN 0 EEPROM Value */
#define E1000_EEPROM_LED_LOGIC 0x0020
/* Led Logic Word */
#define E1000_EEPROM_RW_REG_DATA 16
/* Offset to data in EEPROM read/write registers */
#define E1000_EEPROM_RW_REG_DONE 2
/* Offset to READ/WRITE done bit */
#define E1000_EEPROM_RW_REG_START 1
/* First bit for telling part to start operation */
#define E1000_EEPROM_RW_ADDR_SHIFT 2
/* Shift to the address bits */
#define E1000_EEPROM_POLL_WRITE 1
/* Flag for polling for write complete */
#define E1000_EEPROM_POLL_READ 0
/* Flag for polling for read complete */
/* Register Bit Masks */
/* Device Control */
#define E1000_CTRL_FD 0x00000001
/* Full duplex.0=half; 1=full */
#define E1000_CTRL_BEM 0x00000002
/* Endian Mode.0=little,1=big */
#define E1000_CTRL_PRIOR 0x00000004
/* Priority on PCI. 0=rx,1=fair */
#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004
/*Blocks new Master requests */
#define E1000_CTRL_LRST 0x00000008
/* Link reset. 0=normal,1=reset */
#define E1000_CTRL_TME 0x00000010
/* Test mode. 0=normal,1=test */
#define E1000_CTRL_SLE 0x00000020
/* Serial Link on 0=dis,1=en */
...
...
@@ -1070,6 +1373,7 @@ struct e1000_hw {
#define E1000_CTRL_BEM32 0x00000400
/* Big Endian 32 mode */
#define E1000_CTRL_FRCSPD 0x00000800
/* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000
/* Force Duplex */
#define E1000_CTRL_D_UD_POLARITY 0x00004000
/* Defined polarity of Dock/Undock indication in SDP[0] */
#define E1000_CTRL_SWDPIN0 0x00040000
/* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000
/* SWDPIN 1 value */
#define E1000_CTRL_SWDPIN2 0x00100000
/* SWDPIN 2 value */
...
...
@@ -1089,6 +1393,7 @@ struct e1000_hw {
#define E1000_STATUS_FD 0x00000001
/* Full duplex.0=half,1=full */
#define E1000_STATUS_LU 0x00000002
/* Link up.0=no,1=link */
#define E1000_STATUS_FUNC_MASK 0x0000000C
/* PCI Function Mask */
#define E1000_STATUS_FUNC_SHIFT 2
#define E1000_STATUS_FUNC_0 0x00000000
/* Function 0 */
#define E1000_STATUS_FUNC_1 0x00000004
/* Function 1 */
#define E1000_STATUS_TXOFF 0x00000010
/* transmission paused */
...
...
@@ -1098,6 +1403,8 @@ struct e1000_hw {
#define E1000_STATUS_SPEED_100 0x00000040
/* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080
/* Speed 1000Mb/s */
#define E1000_STATUS_ASDV 0x00000300
/* Auto speed detect value */
#define E1000_STATUS_DOCK_CI 0x00000800
/* Change in Dock/Undock state. Clear on write '0'. */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
/* Status of Master requests. */
#define E1000_STATUS_MTXCKOK 0x00000400
/* MTX clock running OK */
#define E1000_STATUS_PCI66 0x00000800
/* In 66Mhz slot */
#define E1000_STATUS_BUS64 0x00001000
/* In 64 bit slot */
...
...
@@ -1128,6 +1435,18 @@ struct e1000_hw {
#ifndef E1000_EEPROM_GRANT_ATTEMPTS
#define E1000_EEPROM_GRANT_ATTEMPTS 1000
/* EEPROM # attempts to gain grant */
#endif
#define E1000_EECD_AUTO_RD 0x00000200
/* EEPROM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800
/* EEprom Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_NVADDS 0x00018000
/* NVM Address Size */
#define E1000_EECD_SELSHAD 0x00020000
/* Select Shadow RAM */
#define E1000_EECD_INITSRAM 0x00040000
/* Initialize Shadow RAM */
#define E1000_EECD_FLUPD 0x00080000
/* Update FLASH */
#define E1000_EECD_AUPDEN 0x00100000
/* Enable Autonomous FLASH update */
#define E1000_EECD_SHADV 0x00200000
/* Shadow RAM Data Valid */
#define E1000_EECD_SEC1VAL 0x00400000
/* Sector One Valid */
#define E1000_STM_OPCODE 0xDB00
#define E1000_HICR_FW_RESET 0xC0
/* EEPROM Read */
#define E1000_EERD_START 0x00000001
/* Start Read */
...
...
@@ -1171,6 +1490,8 @@ struct e1000_hw {
#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
#define E1000_CTRL_EXT_IAME 0x08000000
/* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000
/* Clear Interrupt timers after IMS clear */
/* MDI Control */
#define E1000_MDIC_DATA_MASK 0x0000FFFF
...
...
@@ -1187,14 +1508,17 @@ struct e1000_hw {
/* LED Control */
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020
#define E1000_LEDCTL_LED0_IVRT 0x00000040
#define E1000_LEDCTL_LED0_BLINK 0x00000080
#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
#define E1000_LEDCTL_LED1_MODE_SHIFT 8
#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000
#define E1000_LEDCTL_LED1_IVRT 0x00004000
#define E1000_LEDCTL_LED1_BLINK 0x00008000
#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
#define E1000_LEDCTL_LED2_MODE_SHIFT 16
#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
#define E1000_LEDCTL_LED2_IVRT 0x00400000
#define E1000_LEDCTL_LED2_BLINK 0x00800000
#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
...
...
@@ -1238,6 +1562,10 @@ struct e1000_hw {
#define E1000_ICR_GPI_EN3 0x00004000
/* GP Int 3 */
#define E1000_ICR_TXD_LOW 0x00008000
#define E1000_ICR_SRPD 0x00010000
#define E1000_ICR_ACK 0x00020000
/* Receive Ack frame */
#define E1000_ICR_MNG 0x00040000
/* Manageability event */
#define E1000_ICR_DOCK 0x00080000
/* Dock/Undock */
#define E1000_ICR_INT_ASSERTED 0x80000000
/* If this bit asserted, the driver should claim the interrupt */
/* Interrupt Cause Set */
#define E1000_ICS_TXDW E1000_ICR_TXDW
/* Transmit desc written back */
...
...
@@ -1255,6 +1583,9 @@ struct e1000_hw {
#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3
/* GP Int 3 */
#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_ICS_SRPD E1000_ICR_SRPD
#define E1000_ICS_ACK E1000_ICR_ACK
/* Receive Ack frame */
#define E1000_ICS_MNG E1000_ICR_MNG
/* Manageability event */
#define E1000_ICS_DOCK E1000_ICR_DOCK
/* Dock/Undock */
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW
/* Transmit desc written back */
...
...
@@ -1272,6 +1603,9 @@ struct e1000_hw {
#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3
/* GP Int 3 */
#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_IMS_SRPD E1000_ICR_SRPD
#define E1000_IMS_ACK E1000_ICR_ACK
/* Receive Ack frame */
#define E1000_IMS_MNG E1000_ICR_MNG
/* Manageability event */
#define E1000_IMS_DOCK E1000_ICR_DOCK
/* Dock/Undock */
/* Interrupt Mask Clear */
#define E1000_IMC_TXDW E1000_ICR_TXDW
/* Transmit desc written back */
...
...
@@ -1289,6 +1623,9 @@ struct e1000_hw {
#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3
/* GP Int 3 */
#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_IMC_SRPD E1000_ICR_SRPD
#define E1000_IMC_ACK E1000_ICR_ACK
/* Receive Ack frame */
#define E1000_IMC_MNG E1000_ICR_MNG
/* Manageability event */
#define E1000_IMC_DOCK E1000_ICR_DOCK
/* Dock/Undock */
/* Receive Control */
#define E1000_RCTL_RST 0x00000001
/* Software reset */
...
...
@@ -1301,6 +1638,8 @@ struct e1000_hw {
#define E1000_RCTL_LBM_MAC 0x00000040
/* MAC loopback mode */
#define E1000_RCTL_LBM_SLP 0x00000080
/* serial link loopback mode */
#define E1000_RCTL_LBM_TCVR 0x000000C0
/* tcvr loopback mode */
#define E1000_RCTL_DTYP_MASK 0x00000C00
/* Descriptor type mask */
#define E1000_RCTL_DTYP_PS 0x00000400
/* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000
/* rx desc min threshold size */
#define E1000_RCTL_RDMTS_QUAT 0x00000100
/* rx desc min threshold size */
#define E1000_RCTL_RDMTS_EIGTH 0x00000200
/* rx desc min threshold size */
...
...
@@ -1327,6 +1666,34 @@ struct e1000_hw {
#define E1000_RCTL_PMCF 0x00800000
/* pass MAC control frames */
#define E1000_RCTL_BSEX 0x02000000
/* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000
/* Strip Ethernet CRC */
#define E1000_RCTL_FLXBUF_MASK 0x78000000
/* Flexible buffer size */
#define E1000_RCTL_FLXBUF_SHIFT 27
/* Flexible buffer shift */
/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
* ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
* E1000_PSRCTL_BSIZE1_MASK) |
* ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
* E1000_PSRCTL_BSIZE2_MASK) |
* ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
* E1000_PSRCTL_BSIZE3_MASK))
* where value0 = [128..16256], default=256
* value1 = [1024..64512], default=4096
* value2 = [0..64512], default=4096
* value3 = [0..64512], default=0
*/
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
#define E1000_PSRCTL_BSIZE0_SHIFT 7
/* Shift _right_ 7 */
#define E1000_PSRCTL_BSIZE1_SHIFT 2
/* Shift _right_ 2 */
#define E1000_PSRCTL_BSIZE2_SHIFT 6
/* Shift _left_ 6 */
#define E1000_PSRCTL_BSIZE3_SHIFT 14
/* Shift _left_ 14 */
/* Receive Descriptor */
#define E1000_RDT_DELAY 0x0000ffff
/* Delay timer (1=1024us) */
...
...
@@ -1341,6 +1708,23 @@ struct e1000_hw {
#define E1000_FCRTL_RTL 0x0000FFF8
/* Mask Bits[15:3] for RTL */
#define E1000_FCRTL_XONE 0x80000000
/* Enable XON frame transmission */
/* Header split receive */
#define E1000_RFCTL_ISCSI_DIS 0x00000001
#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
#define E1000_RFCTL_NFSW_DIS 0x00000040
#define E1000_RFCTL_NFSR_DIS 0x00000080
#define E1000_RFCTL_NFS_VER_MASK 0x00000300
#define E1000_RFCTL_NFS_VER_SHIFT 8
#define E1000_RFCTL_IPV6_DIS 0x00000400
#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
#define E1000_RFCTL_ACK_DIS 0x00001000
#define E1000_RFCTL_ACKD_DIS 0x00002000
#define E1000_RFCTL_IPFRSP_DIS 0x00004000
#define E1000_RFCTL_EXTEN 0x00008000
#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
/* Receive Descriptor Control */
#define E1000_RXDCTL_PTHRESH 0x0000003F
/* RXDCTL Prefetch Threshold */
#define E1000_RXDCTL_HTHRESH 0x00003F00
/* RXDCTL Host Threshold */
...
...
@@ -1354,6 +1738,8 @@ struct e1000_hw {
#define E1000_TXDCTL_GRAN 0x01000000
/* TXDCTL Granularity */
#define E1000_TXDCTL_LWTHRESH 0xFE000000
/* TXDCTL Low Threshold */
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000
/* GRAN=1, WTHRESH=1 */
#define E1000_TXDCTL_COUNT_DESC 0x00400000
/* Enable the counting of desc.
still to be processed. */
/* Transmit Configuration Word */
#define E1000_TXCW_FD 0x00000020
/* TXCW full duplex */
...
...
@@ -1387,12 +1773,16 @@ struct e1000_hw {
#define E1000_TCTL_PBE 0x00800000
/* Packet Burst Enable */
#define E1000_TCTL_RTLC 0x01000000
/* Re-transmit on late collision */
#define E1000_TCTL_NRTU 0x02000000
/* No Re-transmit on underrun */
#define E1000_TCTL_MULR 0x10000000
/* Multiple request support */
/* Receive Checksum Control */
#define E1000_RXCSUM_PCSS_MASK 0x000000FF
/* Packet Checksum Start */
#define E1000_RXCSUM_IPOFL 0x00000100
/* IPv4 checksum offload */
#define E1000_RXCSUM_TUOFL 0x00000200
/* TCP / UDP checksum offload */
#define E1000_RXCSUM_IPV6OFL 0x00000400
/* IPv6 checksum offload */
#define E1000_RXCSUM_IPPCSE 0x00001000
/* IP payload checksum enable */
#define E1000_RXCSUM_PCSD 0x00002000
/* packet checksum disabled */
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
...
...
@@ -1411,6 +1801,7 @@ struct e1000_hw {
#define E1000_WUFC_ARP 0x00000020
/* ARP Request Packet Wakeup Enable */
#define E1000_WUFC_IPV4 0x00000040
/* Directed IPv4 Packet Wakeup Enable */
#define E1000_WUFC_IPV6 0x00000080
/* Directed IPv6 Packet Wakeup Enable */
#define E1000_WUFC_IGNORE_TCO 0x00008000
/* Ignore WakeOn TCO packets */
#define E1000_WUFC_FLX0 0x00010000
/* Flexible Filter 0 Enable */
#define E1000_WUFC_FLX1 0x00020000
/* Flexible Filter 1 Enable */
#define E1000_WUFC_FLX2 0x00040000
/* Flexible Filter 2 Enable */
...
...
@@ -1446,13 +1837,19 @@ struct e1000_hw {
#define E1000_MANC_ARP_EN 0x00002000
/* Enable ARP Request Filtering */
#define E1000_MANC_NEIGHBOR_EN 0x00004000
/* Enable Neighbor Discovery
* Filtering */
#define E1000_MANC_ARP_RES_EN 0x00008000
/* Enable ARP response Filtering */
#define E1000_MANC_TCO_RESET 0x00010000
/* TCO Reset Occurred */
#define E1000_MANC_RCV_TCO_EN 0x00020000
/* Receive TCO Packets Enabled */
#define E1000_MANC_REPORT_STATUS 0x00040000
/* Status Reporting Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000
/* Block phy resets */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
/* Enable MAC address
* filtering */
#define E1000_MANC_EN_MNG2HOST 0x00200000
/* Enable MNG packets to host
* memory */
#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000
/* Enable IP address
* filtering */
#define E1000_MANC_EN_XSUM_FILTER 0x00800000
/* Enable checksum filtering */
#define E1000_MANC_BR_EN 0x01000000
/* Enable broadcast filtering */
#define E1000_MANC_SMB_REQ 0x01000000
/* SMBus Request */
#define E1000_MANC_SMB_GNT 0x02000000
/* SMBus Grant */
#define E1000_MANC_SMB_CLK_IN 0x04000000
/* SMBus Clock In */
...
...
@@ -1463,11 +1860,97 @@ struct e1000_hw {
#define E1000_MANC_SMB_DATA_OUT_SHIFT 28
/* SMBus Data Out Shift */
#define E1000_MANC_SMB_CLK_OUT_SHIFT 29
/* SMBus Clock Out Shift */
/* SW Semaphore Register */
#define E1000_SWSM_SMBI 0x00000001
/* Driver Semaphore bit */
#define E1000_SWSM_SWESMBI 0x00000002
/* FW Semaphore bit */
#define E1000_SWSM_WMNG 0x00000004
/* Wake MNG Clock */
#define E1000_SWSM_DRV_LOAD 0x00000008
/* Driver Loaded Bit */
/* FW Semaphore Register */
#define E1000_FWSM_MODE_MASK 0x0000000E
/* FW mode */
#define E1000_FWSM_MODE_SHIFT 1
#define E1000_FWSM_FW_VALID 0x00008000
/* FW established a valid mode */
/* FFLT Debug Register */
#define E1000_FFLT_DBG_INVC 0x00100000
/* Invalid /C/ code handling */
typedef
enum
{
e1000_mng_mode_none
=
0
,
e1000_mng_mode_asf
,
e1000_mng_mode_pt
,
e1000_mng_mode_ipmi
,
e1000_mng_mode_host_interface_only
}
e1000_mng_mode
;
/* Host Inteface Control Register */
#define E1000_HICR_EN 0x00000001
/* Enable Bit - RO */
#define E1000_HICR_C 0x00000002
/* Driver sets this bit when done
* to put command in RAM */
#define E1000_HICR_SV 0x00000004
/* Status Validity */
#define E1000_HICR_FWR 0x00000080
/* FW reset. Set by the Host */
/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
#define E1000_HI_MAX_DATA_LENGTH 252
/* Host Interface data length */
#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792
/* Number of bytes in range */
#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448
/* Number of dwords in range */
#define E1000_HI_COMMAND_TIMEOUT 500
/* Time in ms to process HI command */
struct
e1000_host_command_header
{
uint8_t
command_id
;
uint8_t
command_length
;
uint8_t
command_options
;
/* I/F bits for command, status for return */
uint8_t
checksum
;
};
struct
e1000_host_command_info
{
struct
e1000_host_command_header
command_header
;
/* Command Head/Command Result Head has 4 bytes */
uint8_t
command_data
[
E1000_HI_MAX_DATA_LENGTH
];
/* Command data can length 0..252 */
};
/* Host SMB register #0 */
#define E1000_HSMC0R_CLKIN 0x00000001
/* SMB Clock in */
#define E1000_HSMC0R_DATAIN 0x00000002
/* SMB Data in */
#define E1000_HSMC0R_DATAOUT 0x00000004
/* SMB Data out */
#define E1000_HSMC0R_CLKOUT 0x00000008
/* SMB Clock out */
/* Host SMB register #1 */
#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN
#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT
#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
/* FW Status Register */
#define E1000_FWSTS_FWS_MASK 0x000000FF
/* FW Status */
/* Wake Up Packet Length */
#define E1000_WUPL_LENGTH_MASK 0x0FFF
/* Only the lower 12 bits are valid */
#define E1000_MDALIGN 4096
#define E1000_GCR_BEM32 0x00400000
/* Function Active and Power State to MNG */
#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
#define E1000_FACTPS_LAN0_VALID 0x00000004
#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008
#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0
#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6
#define E1000_FACTPS_LAN1_VALID 0x00000100
#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200
#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000
#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12
#define E1000_FACTPS_IDE_ENABLE 0x00004000
#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000
#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000
#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18
#define E1000_FACTPS_SP_ENABLE 0x00100000
#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000
#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000
#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24
#define E1000_FACTPS_IPMI_ENABLE 0x04000000
#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000
#define E1000_FACTPS_MNGCG 0x20000000
#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000
#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000
/* EEPROM Commands - Microwire */
#define EEPROM_READ_OPCODE_MICROWIRE 0x6
/* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5
/* EEPROM write opcode */
...
...
@@ -1477,22 +1960,20 @@ struct e1000_hw {
/* EEPROM Commands - SPI */
#define EEPROM_MAX_RETRY_SPI 5000
/* Max wait of 5ms, for RDY signal */
#define EEPROM_READ_OPCODE_SPI 0x3
/* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE_SPI 0x2
/* EEPROM write opcode */
#define EEPROM_A8_OPCODE_SPI 0x8
/* opcode bit-3 = address bit-8 */
#define EEPROM_WREN_OPCODE_SPI 0x6
/* EEPROM set Write Enable latch */
#define EEPROM_WRDI_OPCODE_SPI 0x4
/* EEPROM reset Write Enable latch */
#define EEPROM_RDSR_OPCODE_SPI 0x5
/* EEPROM read Status register */
#define EEPROM_WRSR_OPCODE_SPI 0x1
/* EEPROM write Status register */
#define EEPROM_READ_OPCODE_SPI 0x03
/* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE_SPI 0x02
/* EEPROM write opcode */
#define EEPROM_A8_OPCODE_SPI 0x08
/* opcode bit-3 = address bit-8 */
#define EEPROM_WREN_OPCODE_SPI 0x06
/* EEPROM set Write Enable latch */
#define EEPROM_WRDI_OPCODE_SPI 0x04
/* EEPROM reset Write Enable latch */
#define EEPROM_RDSR_OPCODE_SPI 0x05
/* EEPROM read Status register */
#define EEPROM_WRSR_OPCODE_SPI 0x01
/* EEPROM write Status register */
#define EEPROM_ERASE4K_OPCODE_SPI 0x20
/* EEPROM ERASE 4KB */
#define EEPROM_ERASE64K_OPCODE_SPI 0xD8
/* EEPROM ERASE 64KB */
#define EEPROM_ERASE256_OPCODE_SPI 0xDB
/* EEPROM ERASE 256B */
/* EEPROM Size definitions */
#define EEPROM_SIZE_16KB 0x1800
#define EEPROM_SIZE_8KB 0x1400
#define EEPROM_SIZE_4KB 0x1000
#define EEPROM_SIZE_2KB 0x0C00
#define EEPROM_SIZE_1KB 0x0800
#define EEPROM_SIZE_512B 0x0400
#define EEPROM_SIZE_128B 0x0000
#define EEPROM_WORD_SIZE_SHIFT 6
#define EEPROM_SIZE_SHIFT 10
#define EEPROM_SIZE_MASK 0x1C00
/* EEPROM Word Offsets */
...
...
@@ -1606,7 +2087,22 @@ struct e1000_hw {
#define IFS_MIN 40
#define IFS_RATIO 4
/* Extended Configuration Control and Size */
#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002
#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004
#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008
#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000
#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
/* PBA constants */
#define E1000_PBA_12K 0x000C
/* 12KB, default Rx allocation */
#define E1000_PBA_16K 0x0010
/* 16KB, default TX allocation */
#define E1000_PBA_22K 0x0016
#define E1000_PBA_24K 0x0018
...
...
@@ -1663,6 +2159,13 @@ struct e1000_hw {
/* Number of milliseconds we wait for auto-negotiation to complete */
#define LINK_UP_TIMEOUT 500
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
#define AUTO_READ_DONE_TIMEOUT 10
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
#define PHY_CFG_TIMEOUT 40
#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
/* The carrier extension symbol, as received by the NIC. */
...
...
@@ -1763,6 +2266,7 @@ struct e1000_hw {
#define IGP01E1000_PHY_LINK_HEALTH 0x13
/* PHY Link Health Register */
#define IGP01E1000_GMII_FIFO 0x14
/* GMII FIFO Register */
#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15
/* PHY Channel Quality Register */
#define IGP02E1000_PHY_POWER_MGMT 0x19
#define IGP01E1000_PHY_PAGE_SELECT 0x1F
/* PHY Page Select Core Register */
/* IGP01E1000 AGC Registers - stores the cable length values*/
...
...
@@ -1771,12 +2275,20 @@ struct e1000_hw {
#define IGP01E1000_PHY_AGC_C 0x1472
#define IGP01E1000_PHY_AGC_D 0x1872
/* IGP02E1000 AGC Registers for cable length values */
#define IGP02E1000_PHY_AGC_A 0x11B1
#define IGP02E1000_PHY_AGC_B 0x12B1
#define IGP02E1000_PHY_AGC_C 0x14B1
#define IGP02E1000_PHY_AGC_D 0x18B1
/* IGP01E1000 DSP Reset Register */
#define IGP01E1000_PHY_DSP_RESET 0x1F33
#define IGP01E1000_PHY_DSP_SET 0x1F71
#define IGP01E1000_PHY_DSP_FFE 0x1F35
#define IGP01E1000_PHY_CHANNEL_NUM 4
#define IGP02E1000_PHY_CHANNEL_NUM 4
#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
...
...
@@ -2060,20 +2572,30 @@ struct e1000_hw {
#define IGP01E1000_MSE_CHANNEL_B 0x0F00
#define IGP01E1000_MSE_CHANNEL_A 0xF000
#define IGP02E1000_PM_SPD 0x0001
/* Smart Power Down */
#define IGP02E1000_PM_D3_LPLU 0x0004
/* Enable LPLU in non-D0a modes */
#define IGP02E1000_PM_D0_LPLU 0x0002
/* Enable LPLU in D0a mode */
/* IGP01E1000 DSP reset macros */
#define DSP_RESET_ENABLE 0x0
#define DSP_RESET_DISABLE 0x2
#define E1000_MAX_DSP_RESETS 10
/* IGP01E1000 AGC Registers */
/* IGP01E1000
& IGP02E1000
AGC Registers */
#define IGP01E1000_AGC_LENGTH_SHIFT 7
/* Coarse - 13:11, Fine - 10:7 */
#define IGP02E1000_AGC_LENGTH_SHIFT 9
/* Coarse - 15:13, Fine - 12:9 */
/* IGP02E1000 AGC Register Length 9-bit mask */
#define IGP02E1000_AGC_LENGTH_MASK 0x7F
/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128
/* The precision
of th
e length is +/- 10 meters */
/* The precision
error of the cabl
e length is +/- 10 meters */
#define IGP01E1000_AGC_RANGE 10
#define IGP02E1000_AGC_RANGE 10
/* IGP01E1000 PCS Initialization register */
/* bits 3:6 in the PCS registers stores the channels polarity */
...
...
@@ -2113,6 +2635,8 @@ struct e1000_hw {
#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
#define M88E1011_I_REV_4 0x04
#define M88E1111_I_PHY_ID 0x01410CC0
#define L1LXT971A_PHY_ID 0x001378E0
/* Miscellaneous PHY bit definitions. */
#define PHY_PREAMBLE 0xFFFFFFFF
...
...
drivers/net/e1000/e1000_main.c
View file @
1b981021
/*******************************************************************************
Copyright(c) 1999 - 200
4
Intel Corporation. All rights reserved.
Copyright(c) 1999 - 200
5
Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
...
...
@@ -29,33 +29,9 @@
#include "e1000.h"
/* Change Log
* 5.3.12 6/7/04
* - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com>
* - if_mii support and associated kcompat for older kernels
* - More errlogging support from Jon Mason <jonmason@us.ibm.com>
* - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
*
* 5.7.1 12/16/04
* - Resurrect 82547EI/GI related fix in e1000_intr to avoid deadlocks. This
* fix was removed as it caused system instability. The suspected cause of
* this is the called to e1000_irq_disable in e1000_intr. Inlined the
* required piece of e1000_irq_disable into e1000_intr - Anton Blanchard
* 5.7.0 12/10/04
* - include fix to the condition that determines when to quit NAPI - Robert Olsson
* - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
* 5.6.5 11/01/04
* - Enabling NETIF_F_SG without checksum offload is illegal -
John Mason <jdmason@us.ibm.com>
* 5.6.3 10/26/04
* - Remove redundant initialization - Jamal Hadi
* - Reset buffer_info->dma in tx resource cleanup logic
* 5.6.2 10/12/04
* - Avoid filling tx_ring completely - shemminger@osdl.org
* - Replace schedule_timeout() with msleep()/msleep_interruptible() -
* nacc@us.ibm.com
* - Sparse cleanup - shemminger@osdl.org
* - Fix tx resource cleanup logic
* - LLTX support - ak@suse.de and hadi@cyberus.ca
* 6.0.44+ 2/15/05
* o applied Anton's patch to resolve tx hang in hardware
* o Applied Andrew Mortons patch - e1000 stops working after resume
*/
char
e1000_driver_name
[]
=
"e1000"
;
...
...
@@ -65,7 +41,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
#define DRV_VERSION "
5.7.6
-k2"DRIVERNAPI
#define DRV_VERSION "
6.0.54
-k2"DRIVERNAPI
char
e1000_driver_version
[]
=
DRV_VERSION
;
char
e1000_copyright
[]
=
"Copyright (c) 1999-2004 Intel Corporation."
;
...
...
@@ -96,6 +72,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE
(
0x1017
),
INTEL_E1000_ETHERNET_DEVICE
(
0x1018
),
INTEL_E1000_ETHERNET_DEVICE
(
0x1019
),
INTEL_E1000_ETHERNET_DEVICE
(
0x101A
),
INTEL_E1000_ETHERNET_DEVICE
(
0x101D
),
INTEL_E1000_ETHERNET_DEVICE
(
0x101E
),
INTEL_E1000_ETHERNET_DEVICE
(
0x1026
),
...
...
@@ -110,6 +87,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE
(
0x107B
),
INTEL_E1000_ETHERNET_DEVICE
(
0x107C
),
INTEL_E1000_ETHERNET_DEVICE
(
0x108A
),
INTEL_E1000_ETHERNET_DEVICE
(
0x108B
),
INTEL_E1000_ETHERNET_DEVICE
(
0x108C
),
INTEL_E1000_ETHERNET_DEVICE
(
0x1099
),
/* required last entry */
{
0
,}
};
...
...
@@ -155,10 +135,14 @@ static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
static
int
e1000_clean
(
struct
net_device
*
netdev
,
int
*
budget
);
static
boolean_t
e1000_clean_rx_irq
(
struct
e1000_adapter
*
adapter
,
int
*
work_done
,
int
work_to_do
);
static
boolean_t
e1000_clean_rx_irq_ps
(
struct
e1000_adapter
*
adapter
,
int
*
work_done
,
int
work_to_do
);
#else
static
boolean_t
e1000_clean_rx_irq
(
struct
e1000_adapter
*
adapter
);
static
boolean_t
e1000_clean_rx_irq_ps
(
struct
e1000_adapter
*
adapter
);
#endif
static
void
e1000_alloc_rx_buffers
(
struct
e1000_adapter
*
adapter
);
static
void
e1000_alloc_rx_buffers_ps
(
struct
e1000_adapter
*
adapter
);
static
int
e1000_ioctl
(
struct
net_device
*
netdev
,
struct
ifreq
*
ifr
,
int
cmd
);
static
int
e1000_mii_ioctl
(
struct
net_device
*
netdev
,
struct
ifreq
*
ifr
,
int
cmd
);
...
...
@@ -286,6 +270,28 @@ e1000_irq_enable(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH
(
&
adapter
->
hw
);
}
}
void
e1000_update_mng_vlan
(
struct
e1000_adapter
*
adapter
)
{
struct
net_device
*
netdev
=
adapter
->
netdev
;
uint16_t
vid
=
adapter
->
hw
.
mng_cookie
.
vlan_id
;
uint16_t
old_vid
=
adapter
->
mng_vlan_id
;
if
(
adapter
->
vlgrp
)
{
if
(
!
adapter
->
vlgrp
->
vlan_devices
[
vid
])
{
if
(
adapter
->
hw
.
mng_cookie
.
status
&
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
)
{
e1000_vlan_rx_add_vid
(
netdev
,
vid
);
adapter
->
mng_vlan_id
=
vid
;
}
else
adapter
->
mng_vlan_id
=
E1000_MNG_VLAN_NONE
;
if
((
old_vid
!=
(
uint16_t
)
E1000_MNG_VLAN_NONE
)
&&
(
vid
!=
old_vid
)
&&
!
adapter
->
vlgrp
->
vlan_devices
[
old_vid
])
e1000_vlan_rx_kill_vid
(
netdev
,
old_vid
);
}
}
}
int
e1000_up
(
struct
e1000_adapter
*
adapter
)
...
...
@@ -310,19 +316,33 @@ e1000_up(struct e1000_adapter *adapter)
e1000_configure_tx
(
adapter
);
e1000_setup_rctl
(
adapter
);
e1000_configure_rx
(
adapter
);
e1000_alloc_rx_buffers
(
adapter
);
adapter
->
alloc_rx_buf
(
adapter
);
#ifdef CONFIG_PCI_MSI
if
(
adapter
->
hw
.
mac_type
>
e1000_82547_rev_2
)
{
adapter
->
have_msi
=
TRUE
;
if
((
err
=
pci_enable_msi
(
adapter
->
pdev
)))
{
DPRINTK
(
PROBE
,
ERR
,
"Unable to allocate MSI interrupt Error: %d
\n
"
,
err
);
adapter
->
have_msi
=
FALSE
;
}
}
#endif
if
((
err
=
request_irq
(
adapter
->
pdev
->
irq
,
&
e1000_intr
,
SA_SHIRQ
|
SA_SAMPLE_RANDOM
,
netdev
->
name
,
netdev
)))
netdev
->
name
,
netdev
)))
{
DPRINTK
(
PROBE
,
ERR
,
"Unable to allocate interrupt Error: %d
\n
"
,
err
);
return
err
;
}
mod_timer
(
&
adapter
->
watchdog_timer
,
jiffies
);
e1000_irq_enable
(
adapter
);
#ifdef CONFIG_E1000_NAPI
netif_poll_enable
(
netdev
);
#endif
e1000_irq_enable
(
adapter
);
return
0
;
}
...
...
@@ -333,6 +353,11 @@ e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable
(
adapter
);
free_irq
(
adapter
->
pdev
->
irq
,
netdev
);
#ifdef CONFIG_PCI_MSI
if
(
adapter
->
hw
.
mac_type
>
e1000_82547_rev_2
&&
adapter
->
have_msi
==
TRUE
)
pci_disable_msi
(
adapter
->
pdev
);
#endif
del_timer_sync
(
&
adapter
->
tx_fifo_stall_timer
);
del_timer_sync
(
&
adapter
->
watchdog_timer
);
del_timer_sync
(
&
adapter
->
phy_info_timer
);
...
...
@@ -350,62 +375,93 @@ e1000_down(struct e1000_adapter *adapter)
e1000_clean_rx_ring
(
adapter
);
/* If WoL is not enabled
* and management mode is not IAMT
* Power down the PHY so no link is implied when interface is down */
if
(
!
adapter
->
wol
&&
adapter
->
hw
.
media_type
==
e1000_media_type_copper
)
{
if
(
!
adapter
->
wol
&&
adapter
->
hw
.
mac_type
>=
e1000_82540
&&
adapter
->
hw
.
media_type
==
e1000_media_type_copper
&&
!
e1000_check_mng_mode
(
&
adapter
->
hw
)
&&
!
(
E1000_READ_REG
(
&
adapter
->
hw
,
MANC
)
&
E1000_MANC_SMBUS_EN
))
{
uint16_t
mii_reg
;
e1000_read_phy_reg
(
&
adapter
->
hw
,
PHY_CTRL
,
&
mii_reg
);
mii_reg
|=
MII_CR_POWER_DOWN
;
e1000_write_phy_reg
(
&
adapter
->
hw
,
PHY_CTRL
,
mii_reg
);
mdelay
(
1
);
}
}
void
e1000_reset
(
struct
e1000_adapter
*
adapter
)
{
uint32_t
pba
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
uint32_t
pba
,
manc
;
uint16_t
fc_high_water_mark
=
E1000_FC_HIGH_DIFF
;
uint16_t
fc_low_water_mark
=
E1000_FC_LOW_DIFF
;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
*/
if
(
adapter
->
hw
.
mac_type
<
e1000_82547
)
{
if
(
adapter
->
rx_buffer_len
>
E1000_RXBUFFER_8192
)
pba
=
E1000_PBA_40K
;
else
pba
=
E1000_PBA_48K
;
}
else
{
if
(
adapter
->
rx_buffer_len
>
E1000_RXBUFFER_8192
)
pba
=
E1000_PBA_22K
;
else
switch
(
adapter
->
hw
.
mac_type
)
{
case
e1000_82547
:
case
e1000_82547_rev_2
:
pba
=
E1000_PBA_30K
;
break
;
case
e1000_82573
:
pba
=
E1000_PBA_12K
;
break
;
default:
pba
=
E1000_PBA_48K
;
break
;
}
if
((
adapter
->
hw
.
mac_type
!=
e1000_82573
)
&&
(
adapter
->
rx_buffer_len
>
E1000_RXBUFFER_8192
))
{
pba
-=
8
;
/* allocate more FIFO for Tx */
/* send an XOFF when there is enough space in the
* Rx FIFO to hold one extra full size Rx packet
*/
fc_high_water_mark
=
netdev
->
mtu
+
ENET_HEADER_SIZE
+
ETHERNET_FCS_SIZE
+
1
;
fc_low_water_mark
=
fc_high_water_mark
+
8
;
}
if
(
adapter
->
hw
.
mac_type
==
e1000_82547
)
{
adapter
->
tx_fifo_head
=
0
;
adapter
->
tx_head_addr
=
pba
<<
E1000_TX_HEAD_ADDR_SHIFT
;
adapter
->
tx_fifo_size
=
(
E1000_PBA_40K
-
pba
)
<<
E1000_PBA_BYTES_SHIFT
;
atomic_set
(
&
adapter
->
tx_fifo_stall
,
0
);
}
E1000_WRITE_REG
(
&
adapter
->
hw
,
PBA
,
pba
);
/* flow control settings */
adapter
->
hw
.
fc_high_water
=
(
pba
<<
E1000_PBA_BYTES_SHIFT
)
-
E1000_FC_HIGH_DIFF
;
fc_high_water_mark
;
adapter
->
hw
.
fc_low_water
=
(
pba
<<
E1000_PBA_BYTES_SHIFT
)
-
E1000_FC_LOW_DIFF
;
fc_low_water_mark
;
adapter
->
hw
.
fc_pause_time
=
E1000_FC_PAUSE_TIME
;
adapter
->
hw
.
fc_send_xon
=
1
;
adapter
->
hw
.
fc
=
adapter
->
hw
.
original_fc
;
/* Allow time for pending master requests to run */
e1000_reset_hw
(
&
adapter
->
hw
);
if
(
adapter
->
hw
.
mac_type
>=
e1000_82544
)
E1000_WRITE_REG
(
&
adapter
->
hw
,
WUC
,
0
);
if
(
e1000_init_hw
(
&
adapter
->
hw
))
DPRINTK
(
PROBE
,
ERR
,
"Hardware Error
\n
"
);
e1000_update_mng_vlan
(
adapter
);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
E1000_WRITE_REG
(
&
adapter
->
hw
,
VET
,
ETHERNET_IEEE_VLAN_TYPE
);
e1000_reset_adaptive
(
&
adapter
->
hw
);
e1000_phy_get_info
(
&
adapter
->
hw
,
&
adapter
->
phy_info
);
if
(
adapter
->
en_mng_pt
)
{
manc
=
E1000_READ_REG
(
&
adapter
->
hw
,
MANC
);
manc
|=
(
E1000_MANC_ARP_EN
|
E1000_MANC_EN_MNG2HOST
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
MANC
,
manc
);
}
}
/**
...
...
@@ -426,15 +482,13 @@ e1000_probe(struct pci_dev *pdev,
{
struct
net_device
*
netdev
;
struct
e1000_adapter
*
adapter
;
unsigned
long
mmio_start
,
mmio_len
;
uint32_t
swsm
;
static
int
cards_found
=
0
;
unsigned
long
mmio_start
;
int
mmio_len
;
int
pci_using_dac
;
int
i
;
int
err
;
int
i
,
err
,
pci_using_dac
;
uint16_t
eeprom_data
;
uint16_t
eeprom_apme_mask
=
E1000_EEPROM_APME
;
if
((
err
=
pci_enable_device
(
pdev
)))
return
err
;
...
...
@@ -521,6 +575,9 @@ e1000_probe(struct pci_dev *pdev,
if
((
err
=
e1000_sw_init
(
adapter
)))
goto
err_sw_init
;
if
((
err
=
e1000_check_phy_reset_block
(
&
adapter
->
hw
)))
DPRINTK
(
PROBE
,
INFO
,
"PHY reset is blocked due to SOL/IDER session.
\n
"
);
if
(
adapter
->
hw
.
mac_type
>=
e1000_82543
)
{
netdev
->
features
=
NETIF_F_SG
|
NETIF_F_HW_CSUM
|
...
...
@@ -533,6 +590,11 @@ e1000_probe(struct pci_dev *pdev,
if
((
adapter
->
hw
.
mac_type
>=
e1000_82544
)
&&
(
adapter
->
hw
.
mac_type
!=
e1000_82547
))
netdev
->
features
|=
NETIF_F_TSO
;
#ifdef NETIF_F_TSO_IPV6
if
(
adapter
->
hw
.
mac_type
>
e1000_82547_rev_2
)
netdev
->
features
|=
NETIF_F_TSO_IPV6
;
#endif
#endif
if
(
pci_using_dac
)
netdev
->
features
|=
NETIF_F_HIGHDMA
;
...
...
@@ -540,6 +602,8 @@ e1000_probe(struct pci_dev *pdev,
/* hard_start_xmit is safe against parallel locking */
netdev
->
features
|=
NETIF_F_LLTX
;
adapter
->
en_mng_pt
=
e1000_enable_mng_pass_thru
(
&
adapter
->
hw
);
/* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */
...
...
@@ -555,7 +619,7 @@ e1000_probe(struct pci_dev *pdev,
/* copy the MAC address out of the EEPROM */
if
(
e1000_read_mac_addr
(
&
adapter
->
hw
))
if
(
e1000_read_mac_addr
(
&
adapter
->
hw
))
DPRINTK
(
PROBE
,
ERR
,
"EEPROM Read Error
\n
"
);
memcpy
(
netdev
->
dev_addr
,
adapter
->
hw
.
mac_addr
,
netdev
->
addr_len
);
...
...
@@ -629,6 +693,17 @@ e1000_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */
e1000_reset
(
adapter
);
/* Let firmware know the driver has taken over */
switch
(
adapter
->
hw
.
mac_type
)
{
case
e1000_82573
:
swsm
=
E1000_READ_REG
(
&
adapter
->
hw
,
SWSM
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
SWSM
,
swsm
|
E1000_SWSM_DRV_LOAD
);
break
;
default:
break
;
}
strcpy
(
netdev
->
name
,
"eth%d"
);
if
((
err
=
register_netdev
(
netdev
)))
goto
err_register
;
...
...
@@ -664,7 +739,7 @@ e1000_remove(struct pci_dev *pdev)
{
struct
net_device
*
netdev
=
pci_get_drvdata
(
pdev
);
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
uint32_t
manc
;
uint32_t
manc
,
swsm
;
flush_scheduled_work
();
...
...
@@ -677,8 +752,20 @@ e1000_remove(struct pci_dev *pdev)
}
}
switch
(
adapter
->
hw
.
mac_type
)
{
case
e1000_82573
:
swsm
=
E1000_READ_REG
(
&
adapter
->
hw
,
SWSM
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
SWSM
,
swsm
&
~
E1000_SWSM_DRV_LOAD
);
break
;
default:
break
;
}
unregister_netdev
(
netdev
);
if
(
!
e1000_check_phy_reset_block
(
&
adapter
->
hw
))
e1000_phy_hw_reset
(
&
adapter
->
hw
);
iounmap
(
adapter
->
hw
.
hw_addr
);
...
...
@@ -717,6 +804,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
pci_read_config_word
(
pdev
,
PCI_COMMAND
,
&
hw
->
pci_cmd_word
);
adapter
->
rx_buffer_len
=
E1000_RXBUFFER_2048
;
adapter
->
rx_ps_bsize0
=
E1000_RXBUFFER_256
;
hw
->
max_frame_size
=
netdev
->
mtu
+
ENET_HEADER_SIZE
+
ETHERNET_FCS_SIZE
;
hw
->
min_frame_size
=
MINIMUM_ETHERNET_FRAME_SIZE
;
...
...
@@ -730,7 +818,10 @@ e1000_sw_init(struct e1000_adapter *adapter)
/* initialize eeprom parameters */
e1000_init_eeprom_params
(
hw
);
if
(
e1000_init_eeprom_params
(
hw
))
{
E1000_ERR
(
"EEPROM initialization failed
\n
"
);
return
-
EIO
;
}
switch
(
hw
->
mac_type
)
{
default:
...
...
@@ -795,6 +886,11 @@ e1000_open(struct net_device *netdev)
if
((
err
=
e1000_up
(
adapter
)))
goto
err_up
;
adapter
->
mng_vlan_id
=
E1000_MNG_VLAN_NONE
;
if
((
adapter
->
hw
.
mng_cookie
.
status
&
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
))
{
e1000_update_mng_vlan
(
adapter
);
}
return
E1000_SUCCESS
;
...
...
@@ -830,14 +926,18 @@ e1000_close(struct net_device *netdev)
e1000_free_tx_resources
(
adapter
);
e1000_free_rx_resources
(
adapter
);
if
((
adapter
->
hw
.
mng_cookie
.
status
&
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
))
{
e1000_vlan_rx_kill_vid
(
netdev
,
adapter
->
mng_vlan_id
);
}
return
0
;
}
/**
* e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
* @adapter: address of board private structure
* @
begin
: address of beginning of memory
* @
end: address of end
of memory
* @
start
: address of beginning of memory
* @
len: length
of memory
**/
static
inline
boolean_t
e1000_check_64k_bound
(
struct
e1000_adapter
*
adapter
,
...
...
@@ -846,12 +946,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
unsigned
long
begin
=
(
unsigned
long
)
start
;
unsigned
long
end
=
begin
+
len
;
/*
f
irst rev 82545 and 82546 need to not allow any memory
* write location to cross
a
64k boundary due to errata 23 */
/*
F
irst rev 82545 and 82546 need to not allow any memory
* write location to cross 64k boundary due to errata 23 */
if
(
adapter
->
hw
.
mac_type
==
e1000_82545
||
adapter
->
hw
.
mac_type
==
e1000_82546
)
{
/* check buffer doesn't cross 64kB */
adapter
->
hw
.
mac_type
==
e1000_82546
)
{
return
((
begin
^
(
end
-
1
))
>>
16
)
!=
0
?
FALSE
:
TRUE
;
}
...
...
@@ -876,7 +974,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
txdr
->
buffer_info
=
vmalloc
(
size
);
if
(
!
txdr
->
buffer_info
)
{
DPRINTK
(
PROBE
,
ERR
,
"Unable to
Allocate Memory for the T
ransmit descriptor ring
\n
"
);
"Unable to
allocate memory for the t
ransmit descriptor ring
\n
"
);
return
-
ENOMEM
;
}
memset
(
txdr
->
buffer_info
,
0
,
size
);
...
...
@@ -889,38 +987,38 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
txdr
->
desc
=
pci_alloc_consistent
(
pdev
,
txdr
->
size
,
&
txdr
->
dma
);
if
(
!
txdr
->
desc
)
{
setup_tx_desc_die:
DPRINTK
(
PROBE
,
ERR
,
"Unable to Allocate Memory for the Transmit descriptor ring
\n
"
);
vfree
(
txdr
->
buffer_info
);
DPRINTK
(
PROBE
,
ERR
,
"Unable to allocate memory for the transmit descriptor ring
\n
"
);
return
-
ENOMEM
;
}
/*
fix for errata 23, can
t cross 64kB boundary */
/*
Fix for errata 23, can'
t cross 64kB boundary */
if
(
!
e1000_check_64k_bound
(
adapter
,
txdr
->
desc
,
txdr
->
size
))
{
void
*
olddesc
=
txdr
->
desc
;
dma_addr_t
olddma
=
txdr
->
dma
;
DPRINTK
(
TX_ERR
,
ERR
,
"txdr align check failed: %u bytes at %p
\n
"
,
txdr
->
size
,
txdr
->
desc
);
/*
t
ry again, without freeing the previous */
DPRINTK
(
TX_ERR
,
ERR
,
"txdr align check failed: %u bytes "
"at %p
\n
"
,
txdr
->
size
,
txdr
->
desc
);
/*
T
ry again, without freeing the previous */
txdr
->
desc
=
pci_alloc_consistent
(
pdev
,
txdr
->
size
,
&
txdr
->
dma
);
/* failed allocation, critial failure */
if
(
!
txdr
->
desc
)
{
/* Failed allocation, critical failure */
pci_free_consistent
(
pdev
,
txdr
->
size
,
olddesc
,
olddma
);
goto
setup_tx_desc_die
;
}
if
(
!
e1000_check_64k_bound
(
adapter
,
txdr
->
desc
,
txdr
->
size
))
{
/* give up */
pci_free_consistent
(
pdev
,
txdr
->
size
,
txdr
->
desc
,
txdr
->
dma
);
pci_free_consistent
(
pdev
,
txdr
->
size
,
txdr
->
desc
,
txdr
->
dma
);
pci_free_consistent
(
pdev
,
txdr
->
size
,
olddesc
,
olddma
);
DPRINTK
(
PROBE
,
ERR
,
"Unable to Allocate aligned Memory for the Transmit
"
"
descriptor ring
\n
"
);
"Unable to allocate aligned memory
"
"for the transmit
descriptor ring
\n
"
);
vfree
(
txdr
->
buffer_info
);
return
-
ENOMEM
;
}
else
{
/*
free old, move on with the new one since its okay
*/
/*
Free old allocation, new allocation was successful
*/
pci_free_consistent
(
pdev
,
txdr
->
size
,
olddesc
,
olddma
);
}
}
...
...
@@ -1022,59 +1120,88 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
{
struct
e1000_desc_ring
*
rxdr
=
&
adapter
->
rx_ring
;
struct
pci_dev
*
pdev
=
adapter
->
pdev
;
int
size
;
int
size
,
desc_len
;
size
=
sizeof
(
struct
e1000_buffer
)
*
rxdr
->
count
;
rxdr
->
buffer_info
=
vmalloc
(
size
);
if
(
!
rxdr
->
buffer_info
)
{
DPRINTK
(
PROBE
,
ERR
,
"Unable to
Allocate Memory for the Recie
ve descriptor ring
\n
"
);
"Unable to
allocate memory for the recei
ve descriptor ring
\n
"
);
return
-
ENOMEM
;
}
memset
(
rxdr
->
buffer_info
,
0
,
size
);
size
=
sizeof
(
struct
e1000_ps_page
)
*
rxdr
->
count
;
rxdr
->
ps_page
=
kmalloc
(
size
,
GFP_KERNEL
);
if
(
!
rxdr
->
ps_page
)
{
vfree
(
rxdr
->
buffer_info
);
DPRINTK
(
PROBE
,
ERR
,
"Unable to allocate memory for the receive descriptor ring
\n
"
);
return
-
ENOMEM
;
}
memset
(
rxdr
->
ps_page
,
0
,
size
);
size
=
sizeof
(
struct
e1000_ps_page_dma
)
*
rxdr
->
count
;
rxdr
->
ps_page_dma
=
kmalloc
(
size
,
GFP_KERNEL
);
if
(
!
rxdr
->
ps_page_dma
)
{
vfree
(
rxdr
->
buffer_info
);
kfree
(
rxdr
->
ps_page
);
DPRINTK
(
PROBE
,
ERR
,
"Unable to allocate memory for the receive descriptor ring
\n
"
);
return
-
ENOMEM
;
}
memset
(
rxdr
->
ps_page_dma
,
0
,
size
);
if
(
adapter
->
hw
.
mac_type
<=
e1000_82547_rev_2
)
desc_len
=
sizeof
(
struct
e1000_rx_desc
);
else
desc_len
=
sizeof
(
union
e1000_rx_desc_packet_split
);
/* Round up to nearest 4K */
rxdr
->
size
=
rxdr
->
count
*
sizeof
(
struct
e1000_rx_desc
)
;
rxdr
->
size
=
rxdr
->
count
*
desc_len
;
E1000_ROUNDUP
(
rxdr
->
size
,
4096
);
rxdr
->
desc
=
pci_alloc_consistent
(
pdev
,
rxdr
->
size
,
&
rxdr
->
dma
);
if
(
!
rxdr
->
desc
)
{
setup_rx_desc_die:
DPRINTK
(
PROBE
,
ERR
,
"Unble to Allocate Memory for the Recieve descriptor ring
\n
"
);
vfree
(
rxdr
->
buffer_info
);
kfree
(
rxdr
->
ps_page
);
kfree
(
rxdr
->
ps_page_dma
);
DPRINTK
(
PROBE
,
ERR
,
"Unable to allocate memory for the receive descriptor ring
\n
"
);
return
-
ENOMEM
;
}
/*
fix for errata 23, can
t cross 64kB boundary */
/*
Fix for errata 23, can'
t cross 64kB boundary */
if
(
!
e1000_check_64k_bound
(
adapter
,
rxdr
->
desc
,
rxdr
->
size
))
{
void
*
olddesc
=
rxdr
->
desc
;
dma_addr_t
olddma
=
rxdr
->
dma
;
DPRINTK
(
RX_ERR
,
ERR
,
"rxdr align check failed: %u bytes at %p
\n
"
,
rxdr
->
size
,
rxdr
->
desc
);
/* try again, without freeing the previous */
DPRINTK
(
RX_ERR
,
ERR
,
"rxdr align check failed: %u bytes "
"at %p
\n
"
,
rxdr
->
size
,
rxdr
->
desc
);
/* Try again, without freeing the previous */
rxdr
->
desc
=
pci_alloc_consistent
(
pdev
,
rxdr
->
size
,
&
rxdr
->
dma
);
/* failed allocation, critial failure */
if
(
!
rxdr
->
desc
)
{
/* Failed allocation, critical failure */
pci_free_consistent
(
pdev
,
rxdr
->
size
,
olddesc
,
olddma
);
goto
setup_rx_desc_die
;
}
if
(
!
e1000_check_64k_bound
(
adapter
,
rxdr
->
desc
,
rxdr
->
size
))
{
/* give up */
pci_free_consistent
(
pdev
,
rxdr
->
size
,
rxdr
->
desc
,
rxdr
->
dma
);
pci_free_consistent
(
pdev
,
rxdr
->
size
,
rxdr
->
desc
,
rxdr
->
dma
);
pci_free_consistent
(
pdev
,
rxdr
->
size
,
olddesc
,
olddma
);
DPRINTK
(
PROBE
,
ERR
,
"Unable to
Allocate aligned Memory for the
"
"
R
eceive descriptor ring
\n
"
);
"Unable to
allocate aligned memory
"
"
for the r
eceive descriptor ring
\n
"
);
vfree
(
rxdr
->
buffer_info
);
kfree
(
rxdr
->
ps_page
);
kfree
(
rxdr
->
ps_page_dma
);
return
-
ENOMEM
;
}
else
{
/*
free old, move on with the new one since its okay
*/
/*
Free old allocation, new allocation was successful
*/
pci_free_consistent
(
pdev
,
rxdr
->
size
,
olddesc
,
olddma
);
}
}
...
...
@@ -1087,14 +1214,15 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
}
/**
* e1000_setup_rctl - configure the receive control register
* e1000_setup_rctl - configure the receive control register
s
* @adapter: Board private structure
**/
static
void
e1000_setup_rctl
(
struct
e1000_adapter
*
adapter
)
{
uint32_t
rctl
;
uint32_t
rctl
,
rfctl
;
uint32_t
psrctl
=
0
;
rctl
=
E1000_READ_REG
(
&
adapter
->
hw
,
RCTL
);
...
...
@@ -1109,14 +1237,24 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
else
rctl
&=
~
E1000_RCTL_SBP
;
if
(
adapter
->
netdev
->
mtu
<=
ETH_DATA_LEN
)
rctl
&=
~
E1000_RCTL_LPE
;
else
rctl
|=
E1000_RCTL_LPE
;
/* Setup buffer sizes */
rctl
&=
~
(
E1000_RCTL_SZ_4096
);
rctl
|=
(
E1000_RCTL_BSEX
|
E1000_RCTL_LPE
);
if
(
adapter
->
hw
.
mac_type
==
e1000_82573
)
{
/* We can now specify buffers in 1K increments.
* BSIZE and BSEX are ignored in this case. */
rctl
|=
adapter
->
rx_buffer_len
<<
0x11
;
}
else
{
rctl
&=
~
E1000_RCTL_SZ_4096
;
rctl
|=
E1000_RCTL_BSEX
;
switch
(
adapter
->
rx_buffer_len
)
{
case
E1000_RXBUFFER_2048
:
default:
rctl
|=
E1000_RCTL_SZ_2048
;
rctl
&=
~
(
E1000_RCTL_BSEX
|
E1000_RCTL_LPE
)
;
rctl
&=
~
E1000_RCTL_BSEX
;
break
;
case
E1000_RXBUFFER_4096
:
rctl
|=
E1000_RCTL_SZ_4096
;
...
...
@@ -1128,6 +1266,41 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rctl
|=
E1000_RCTL_SZ_16384
;
break
;
}
}
#ifdef CONFIG_E1000_PACKET_SPLIT
/* 82571 and greater support packet-split where the protocol
* header is placed in skb->data and the packet data is
* placed in pages hanging off of skb_shinfo(skb)->nr_frags.
* In the case of a non-split, skb->data is linearly filled,
* followed by the page buffers. Therefore, skb->data is
* sized to hold the largest protocol header.
*/
adapter
->
rx_ps
=
(
adapter
->
hw
.
mac_type
>
e1000_82547_rev_2
)
&&
(
adapter
->
netdev
->
mtu
<
((
3
*
PAGE_SIZE
)
+
adapter
->
rx_ps_bsize0
));
#endif
if
(
adapter
->
rx_ps
)
{
/* Configure extra packet-split registers */
rfctl
=
E1000_READ_REG
(
&
adapter
->
hw
,
RFCTL
);
rfctl
|=
E1000_RFCTL_EXTEN
;
/* disable IPv6 packet split support */
rfctl
|=
E1000_RFCTL_IPV6_DIS
;
E1000_WRITE_REG
(
&
adapter
->
hw
,
RFCTL
,
rfctl
);
rctl
|=
E1000_RCTL_DTYP_PS
|
E1000_RCTL_SECRC
;
psrctl
|=
adapter
->
rx_ps_bsize0
>>
E1000_PSRCTL_BSIZE0_SHIFT
;
psrctl
|=
PAGE_SIZE
>>
E1000_PSRCTL_BSIZE1_SHIFT
;
psrctl
|=
PAGE_SIZE
<<
E1000_PSRCTL_BSIZE2_SHIFT
;
psrctl
|=
PAGE_SIZE
<<
E1000_PSRCTL_BSIZE3_SHIFT
;
E1000_WRITE_REG
(
&
adapter
->
hw
,
PSRCTL
,
psrctl
);
}
E1000_WRITE_REG
(
&
adapter
->
hw
,
RCTL
,
rctl
);
}
...
...
@@ -1143,9 +1316,18 @@ static void
e1000_configure_rx
(
struct
e1000_adapter
*
adapter
)
{
uint64_t
rdba
=
adapter
->
rx_ring
.
dma
;
uint32_t
rdlen
=
adapter
->
rx_ring
.
count
*
sizeof
(
struct
e1000_rx_desc
);
uint32_t
rctl
;
uint32_t
rxcsum
;
uint32_t
rdlen
,
rctl
,
rxcsum
;
if
(
adapter
->
rx_ps
)
{
rdlen
=
adapter
->
rx_ring
.
count
*
sizeof
(
union
e1000_rx_desc_packet_split
);
adapter
->
clean_rx
=
e1000_clean_rx_irq_ps
;
adapter
->
alloc_rx_buf
=
e1000_alloc_rx_buffers_ps
;
}
else
{
rdlen
=
adapter
->
rx_ring
.
count
*
sizeof
(
struct
e1000_rx_desc
);
adapter
->
clean_rx
=
e1000_clean_rx_irq
;
adapter
->
alloc_rx_buf
=
e1000_alloc_rx_buffers
;
}
/* disable receives while setting up the descriptors */
rctl
=
E1000_READ_REG
(
&
adapter
->
hw
,
RCTL
);
...
...
@@ -1172,13 +1354,27 @@ e1000_configure_rx(struct e1000_adapter *adapter)
E1000_WRITE_REG
(
&
adapter
->
hw
,
RDT
,
0
);
/* Enable 82543 Receive Checksum Offload for TCP and UDP */
if
((
adapter
->
hw
.
mac_type
>=
e1000_82543
)
&&
(
adapter
->
rx_csum
==
TRUE
))
{
if
(
adapter
->
hw
.
mac_type
>=
e1000_82543
)
{
rxcsum
=
E1000_READ_REG
(
&
adapter
->
hw
,
RXCSUM
);
if
(
adapter
->
rx_csum
==
TRUE
)
{
rxcsum
|=
E1000_RXCSUM_TUOFL
;
/* Enable 82573 IPv4 payload checksum for UDP fragments
* Must be used in conjunction with packet-split. */
if
((
adapter
->
hw
.
mac_type
>
e1000_82547_rev_2
)
&&
(
adapter
->
rx_ps
))
{
rxcsum
|=
E1000_RXCSUM_IPPCSE
;
}
}
else
{
rxcsum
&=
~
E1000_RXCSUM_TUOFL
;
/* don't need to clear IPPCSE as it defaults to 0 */
}
E1000_WRITE_REG
(
&
adapter
->
hw
,
RXCSUM
,
rxcsum
);
}
if
(
adapter
->
hw
.
mac_type
==
e1000_82573
)
E1000_WRITE_REG
(
&
adapter
->
hw
,
ERT
,
0x0100
);
/* Enable Receives */
E1000_WRITE_REG
(
&
adapter
->
hw
,
RCTL
,
rctl
);
}
...
...
@@ -1210,10 +1406,8 @@ static inline void
e1000_unmap_and_free_tx_resource
(
struct
e1000_adapter
*
adapter
,
struct
e1000_buffer
*
buffer_info
)
{
struct
pci_dev
*
pdev
=
adapter
->
pdev
;
if
(
buffer_info
->
dma
)
{
pci_unmap_page
(
pdev
,
pci_unmap_page
(
adapter
->
pdev
,
buffer_info
->
dma
,
buffer_info
->
length
,
PCI_DMA_TODEVICE
);
...
...
@@ -1281,6 +1475,10 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
vfree
(
rx_ring
->
buffer_info
);
rx_ring
->
buffer_info
=
NULL
;
kfree
(
rx_ring
->
ps_page
);
rx_ring
->
ps_page
=
NULL
;
kfree
(
rx_ring
->
ps_page_dma
);
rx_ring
->
ps_page_dma
=
NULL
;
pci_free_consistent
(
pdev
,
rx_ring
->
size
,
rx_ring
->
desc
,
rx_ring
->
dma
);
...
...
@@ -1297,16 +1495,19 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
{
struct
e1000_desc_ring
*
rx_ring
=
&
adapter
->
rx_ring
;
struct
e1000_buffer
*
buffer_info
;
struct
e1000_ps_page
*
ps_page
;
struct
e1000_ps_page_dma
*
ps_page_dma
;
struct
pci_dev
*
pdev
=
adapter
->
pdev
;
unsigned
long
size
;
unsigned
int
i
;
unsigned
int
i
,
j
;
/* Free all the Rx ring sk_buffs */
for
(
i
=
0
;
i
<
rx_ring
->
count
;
i
++
)
{
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
if
(
buffer_info
->
skb
)
{
ps_page
=
&
rx_ring
->
ps_page
[
i
];
ps_page_dma
=
&
rx_ring
->
ps_page_dma
[
i
];
pci_unmap_single
(
pdev
,
buffer_info
->
dma
,
buffer_info
->
length
,
...
...
@@ -1314,11 +1515,25 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
dev_kfree_skb
(
buffer_info
->
skb
);
buffer_info
->
skb
=
NULL
;
for
(
j
=
0
;
j
<
PS_PAGE_BUFFERS
;
j
++
)
{
if
(
!
ps_page
->
ps_page
[
j
])
break
;
pci_unmap_single
(
pdev
,
ps_page_dma
->
ps_page_dma
[
j
],
PAGE_SIZE
,
PCI_DMA_FROMDEVICE
);
ps_page_dma
->
ps_page_dma
[
j
]
=
0
;
put_page
(
ps_page
->
ps_page
[
j
]);
ps_page
->
ps_page
[
j
]
=
NULL
;
}
}
}
size
=
sizeof
(
struct
e1000_buffer
)
*
rx_ring
->
count
;
memset
(
rx_ring
->
buffer_info
,
0
,
size
);
size
=
sizeof
(
struct
e1000_ps_page
)
*
rx_ring
->
count
;
memset
(
rx_ring
->
ps_page
,
0
,
size
);
size
=
sizeof
(
struct
e1000_ps_page_dma
)
*
rx_ring
->
count
;
memset
(
rx_ring
->
ps_page_dma
,
0
,
size
);
/* Zero out the descriptor ring */
...
...
@@ -1422,15 +1637,15 @@ e1000_set_multi(struct net_device *netdev)
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
dev_mc_list
*
mc_ptr
;
unsigned
long
flags
;
uint32_t
rctl
;
uint32_t
hash_value
;
int
i
;
unsigned
long
flags
;
/* Check for Promiscuous and All Multicast modes */
spin_lock_irqsave
(
&
adapter
->
tx_lock
,
flags
);
/* Check for Promiscuous and All Multicast modes */
rctl
=
E1000_READ_REG
(
hw
,
RCTL
);
if
(
netdev
->
flags
&
IFF_PROMISC
)
{
...
...
@@ -1556,6 +1771,11 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
uint32_t
link
;
e1000_check_for_link
(
&
adapter
->
hw
);
if
(
adapter
->
hw
.
mac_type
==
e1000_82573
)
{
e1000_enable_tx_pkt_filtering
(
&
adapter
->
hw
);
if
(
adapter
->
mng_vlan_id
!=
adapter
->
hw
.
mng_cookie
.
vlan_id
)
e1000_update_mng_vlan
(
adapter
);
}
if
((
adapter
->
hw
.
media_type
==
e1000_media_type_internal_serdes
)
&&
!
(
E1000_READ_REG
(
&
adapter
->
hw
,
TXCW
)
&
E1000_TXCW_ANE
))
...
...
@@ -1632,7 +1852,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
/* Cause software interrupt to ensure rx ring is cleaned */
E1000_WRITE_REG
(
&
adapter
->
hw
,
ICS
,
E1000_ICS_RXDMT0
);
/* Force detection of hung controller every watchdog period*/
/* Force detection of hung controller every watchdog period
*/
adapter
->
detect_tx_hung
=
TRUE
;
/* Reset the timer */
...
...
@@ -1642,6 +1862,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
#define E1000_TX_FLAGS_CSUM 0x00000001
#define E1000_TX_FLAGS_VLAN 0x00000002
#define E1000_TX_FLAGS_TSO 0x00000004
#define E1000_TX_FLAGS_IPV4 0x00000008
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
...
...
@@ -1652,7 +1873,7 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
struct
e1000_context_desc
*
context_desc
;
unsigned
int
i
;
uint32_t
cmd_length
=
0
;
uint16_t
ipcse
,
tucse
,
mss
;
uint16_t
ipcse
=
0
,
tucse
,
mss
;
uint8_t
ipcss
,
ipcso
,
tucss
,
tucso
,
hdr_len
;
int
err
;
...
...
@@ -1665,23 +1886,37 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
hdr_len
=
((
skb
->
h
.
raw
-
skb
->
data
)
+
(
skb
->
h
.
th
->
doff
<<
2
));
mss
=
skb_shinfo
(
skb
)
->
tso_size
;
if
(
skb
->
protocol
==
ntohs
(
ETH_P_IP
))
{
skb
->
nh
.
iph
->
tot_len
=
0
;
skb
->
nh
.
iph
->
check
=
0
;
skb
->
h
.
th
->
check
=
~
csum_tcpudp_magic
(
skb
->
nh
.
iph
->
saddr
,
skb
->
h
.
th
->
check
=
~
csum_tcpudp_magic
(
skb
->
nh
.
iph
->
saddr
,
skb
->
nh
.
iph
->
daddr
,
0
,
IPPROTO_TCP
,
0
);
cmd_length
=
E1000_TXD_CMD_IP
;
ipcse
=
skb
->
h
.
raw
-
skb
->
data
-
1
;
#ifdef NETIF_F_TSO_IPV6
}
else
if
(
skb
->
protocol
==
ntohs
(
ETH_P_IPV6
))
{
skb
->
nh
.
ipv6h
->
payload_len
=
0
;
skb
->
h
.
th
->
check
=
~
csum_ipv6_magic
(
&
skb
->
nh
.
ipv6h
->
saddr
,
&
skb
->
nh
.
ipv6h
->
daddr
,
0
,
IPPROTO_TCP
,
0
);
ipcse
=
0
;
#endif
}
ipcss
=
skb
->
nh
.
raw
-
skb
->
data
;
ipcso
=
(
void
*
)
&
(
skb
->
nh
.
iph
->
check
)
-
(
void
*
)
skb
->
data
;
ipcse
=
skb
->
h
.
raw
-
skb
->
data
-
1
;
tucss
=
skb
->
h
.
raw
-
skb
->
data
;
tucso
=
(
void
*
)
&
(
skb
->
h
.
th
->
check
)
-
(
void
*
)
skb
->
data
;
tucse
=
0
;
cmd_length
|=
(
E1000_TXD_CMD_DEXT
|
E1000_TXD_CMD_TSE
|
E1000_TXD_CMD_IP
|
E1000_TXD_CMD_TCP
|
(
skb
->
len
-
(
hdr_len
)));
E1000_TXD_CMD_TCP
|
(
skb
->
len
-
(
hdr_len
)));
i
=
adapter
->
tx_ring
.
next_to_use
;
context_desc
=
E1000_CONTEXT_DESC
(
adapter
->
tx_ring
,
i
);
...
...
@@ -1760,6 +1995,15 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
if
(
unlikely
(
mss
&&
!
nr_frags
&&
size
==
len
&&
size
>
8
))
size
-=
4
;
#endif
/* work-around for errata 10 and it applies
* to all controllers in PCI-X mode
* The fix is to make sure that the first descriptor of a
* packet is smaller than 2048 - 16 - 16 (or 2016) bytes
*/
if
(
unlikely
((
adapter
->
hw
.
bus_type
==
e1000_bus_type_pcix
)
&&
(
size
>
2015
)
&&
count
==
0
))
size
=
2015
;
/* Workaround for potential 82544 hang in PCI-X. Avoid
* terminating buffers within evenly-aligned dwords. */
if
(
unlikely
(
adapter
->
pcix_82544
&&
...
...
@@ -1840,7 +2084,10 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
if
(
likely
(
tx_flags
&
E1000_TX_FLAGS_TSO
))
{
txd_lower
|=
E1000_TXD_CMD_DEXT
|
E1000_TXD_DTYP_D
|
E1000_TXD_CMD_TSE
;
txd_upper
|=
(
E1000_TXD_POPTS_IXSM
|
E1000_TXD_POPTS_TXSM
)
<<
8
;
txd_upper
|=
E1000_TXD_POPTS_TXSM
<<
8
;
if
(
likely
(
tx_flags
&
E1000_TX_FLAGS_IPV4
))
txd_upper
|=
E1000_TXD_POPTS_IXSM
<<
8
;
}
if
(
likely
(
tx_flags
&
E1000_TX_FLAGS_CSUM
))
{
...
...
@@ -1915,6 +2162,53 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
return
0
;
}
#define MINIMUM_DHCP_PACKET_SIZE 282
static
inline
int
e1000_transfer_dhcp_info
(
struct
e1000_adapter
*
adapter
,
struct
sk_buff
*
skb
)
{
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
uint16_t
length
,
offset
;
if
(
vlan_tx_tag_present
(
skb
))
{
if
(
!
((
vlan_tx_tag_get
(
skb
)
==
adapter
->
hw
.
mng_cookie
.
vlan_id
)
&&
(
adapter
->
hw
.
mng_cookie
.
status
&
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
))
)
return
0
;
}
if
(
htons
(
ETH_P_IP
)
==
skb
->
protocol
)
{
const
struct
iphdr
*
ip
=
skb
->
nh
.
iph
;
if
(
IPPROTO_UDP
==
ip
->
protocol
)
{
struct
udphdr
*
udp
=
(
struct
udphdr
*
)(
skb
->
h
.
uh
);
if
(
ntohs
(
udp
->
dest
)
==
67
)
{
offset
=
(
uint8_t
*
)
udp
+
8
-
skb
->
data
;
length
=
skb
->
len
-
offset
;
return
e1000_mng_write_dhcp_info
(
hw
,
(
uint8_t
*
)
udp
+
8
,
length
);
}
}
}
else
if
((
skb
->
len
>
MINIMUM_DHCP_PACKET_SIZE
)
&&
(
!
skb
->
protocol
))
{
struct
ethhdr
*
eth
=
(
struct
ethhdr
*
)
skb
->
data
;
if
((
htons
(
ETH_P_IP
)
==
eth
->
h_proto
))
{
const
struct
iphdr
*
ip
=
(
struct
iphdr
*
)((
uint8_t
*
)
skb
->
data
+
14
);
if
(
IPPROTO_UDP
==
ip
->
protocol
)
{
struct
udphdr
*
udp
=
(
struct
udphdr
*
)((
uint8_t
*
)
ip
+
(
ip
->
ihl
<<
2
));
if
(
ntohs
(
udp
->
dest
)
==
67
)
{
offset
=
(
uint8_t
*
)
udp
+
8
-
skb
->
data
;
length
=
skb
->
len
-
offset
;
return
e1000_mng_write_dhcp_info
(
hw
,
(
uint8_t
*
)
udp
+
8
,
length
);
}
}
}
}
return
0
;
}
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
static
int
e1000_xmit_frame
(
struct
sk_buff
*
skb
,
struct
net_device
*
netdev
)
...
...
@@ -1952,7 +2246,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if
((
mss
)
||
(
skb
->
ip_summed
==
CHECKSUM_HW
))
count
++
;
count
++
;
/* for sentinel desc */
count
++
;
#else
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
count
++
;
...
...
@@ -1962,6 +2256,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if
(
adapter
->
pcix_82544
)
count
++
;
/* work-around for errata 10 and it applies to all controllers
* in PCI-X mode, so add one more descriptor to the count
*/
if
(
unlikely
((
adapter
->
hw
.
bus_type
==
e1000_bus_type_pcix
)
&&
(
len
>
2015
)))
count
++
;
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
for
(
f
=
0
;
f
<
nr_frags
;
f
++
)
count
+=
TXD_USE_COUNT
(
skb_shinfo
(
skb
)
->
frags
[
f
].
size
,
...
...
@@ -1975,6 +2276,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
local_irq_restore
(
flags
);
return
NETDEV_TX_LOCKED
;
}
if
(
adapter
->
hw
.
tx_pkt_filtering
&&
(
adapter
->
hw
.
mac_type
==
e1000_82573
)
)
e1000_transfer_dhcp_info
(
adapter
,
skb
);
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
...
...
@@ -2011,6 +2315,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
else
if
(
likely
(
e1000_tx_csum
(
adapter
,
skb
)))
tx_flags
|=
E1000_TX_FLAGS_CSUM
;
/* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82573 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must. */
if
(
likely
(
skb
->
protocol
==
ntohs
(
ETH_P_IP
)))
tx_flags
|=
E1000_TX_FLAGS_IPV4
;
e1000_tx_queue
(
adapter
,
e1000_tx_map
(
adapter
,
skb
,
first
,
max_per_txd
,
nr_frags
,
mss
),
tx_flags
);
...
...
@@ -2077,7 +2387,6 @@ static int
e1000_change_mtu
(
struct
net_device
*
netdev
,
int
new_mtu
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
int
old_mtu
=
adapter
->
rx_buffer_len
;
int
max_frame
=
new_mtu
+
ENET_HEADER_SIZE
+
ETHERNET_FCS_SIZE
;
if
((
max_frame
<
MINIMUM_ETHERNET_FRAME_SIZE
)
||
...
...
@@ -2086,29 +2395,45 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
return
-
EINVAL
;
}
if
(
max_frame
<=
MAXIMUM_ETHERNET_FRAME_SIZE
)
{
adapter
->
rx_buffer_len
=
E1000_RXBUFFER_2048
;
#define MAX_STD_JUMBO_FRAME_SIZE 9216
/* might want this to be bigger enum check... */
if
(
adapter
->
hw
.
mac_type
==
e1000_82573
&&
max_frame
>
MAXIMUM_ETHERNET_FRAME_SIZE
)
{
DPRINTK
(
PROBE
,
ERR
,
"Jumbo Frames not supported "
"on 82573
\n
"
);
return
-
EINVAL
;
}
}
else
if
(
adapter
->
hw
.
mac_type
<
e1000_82543
)
{
DPRINTK
(
PROBE
,
ERR
,
"Jumbo Frames not supported on 82542
\n
"
);
if
(
adapter
->
hw
.
mac_type
>
e1000_82547_rev_2
)
{
adapter
->
rx_buffer_len
=
max_frame
;
E1000_ROUNDUP
(
adapter
->
rx_buffer_len
,
1024
);
}
else
{
if
(
unlikely
((
adapter
->
hw
.
mac_type
<
e1000_82543
)
&&
(
max_frame
>
MAXIMUM_ETHERNET_FRAME_SIZE
)))
{
DPRINTK
(
PROBE
,
ERR
,
"Jumbo Frames not supported "
"on 82542
\n
"
);
return
-
EINVAL
;
}
else
{
if
(
max_frame
<=
E1000_RXBUFFER_2048
)
{
adapter
->
rx_buffer_len
=
E1000_RXBUFFER_2048
;
}
else
if
(
max_frame
<=
E1000_RXBUFFER_4096
)
{
adapter
->
rx_buffer_len
=
E1000_RXBUFFER_4096
;
}
else
if
(
max_frame
<=
E1000_RXBUFFER_8192
)
{
adapter
->
rx_buffer_len
=
E1000_RXBUFFER_8192
;
}
else
{
}
else
if
(
max_frame
<=
E1000_RXBUFFER_16384
)
{
adapter
->
rx_buffer_len
=
E1000_RXBUFFER_16384
;
}
}
}
if
(
old_mtu
!=
adapter
->
rx_buffer_len
&&
netif_running
(
netdev
))
{
netdev
->
mtu
=
new_mtu
;
if
(
netif_running
(
netdev
))
{
e1000_down
(
adapter
);
e1000_up
(
adapter
);
}
netdev
->
mtu
=
new_mtu
;
adapter
->
hw
.
max_frame_size
=
max_frame
;
return
0
;
...
...
@@ -2199,6 +2524,17 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter
->
stats
.
tsctc
+=
E1000_READ_REG
(
hw
,
TSCTC
);
adapter
->
stats
.
tsctfc
+=
E1000_READ_REG
(
hw
,
TSCTFC
);
}
if
(
hw
->
mac_type
>
e1000_82547_rev_2
)
{
adapter
->
stats
.
iac
+=
E1000_READ_REG
(
hw
,
IAC
);
adapter
->
stats
.
icrxoc
+=
E1000_READ_REG
(
hw
,
ICRXOC
);
adapter
->
stats
.
icrxptc
+=
E1000_READ_REG
(
hw
,
ICRXPTC
);
adapter
->
stats
.
icrxatc
+=
E1000_READ_REG
(
hw
,
ICRXATC
);
adapter
->
stats
.
ictxptc
+=
E1000_READ_REG
(
hw
,
ICTXPTC
);
adapter
->
stats
.
ictxatc
+=
E1000_READ_REG
(
hw
,
ICTXATC
);
adapter
->
stats
.
ictxqec
+=
E1000_READ_REG
(
hw
,
ICTXQEC
);
adapter
->
stats
.
ictxqmtc
+=
E1000_READ_REG
(
hw
,
ICTXQMTC
);
adapter
->
stats
.
icrxdmtc
+=
E1000_READ_REG
(
hw
,
ICRXDMTC
);
}
/* Fill out the OS statistics structure */
...
...
@@ -2213,9 +2549,9 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter
->
net_stats
.
rx_errors
=
adapter
->
stats
.
rxerrc
+
adapter
->
stats
.
crcerrs
+
adapter
->
stats
.
algnerrc
+
adapter
->
stats
.
rlec
+
adapter
->
stats
.
rnbc
+
adapter
->
stats
.
mpc
+
adapter
->
stats
.
cexterr
;
adapter
->
net_stats
.
rx_dropped
=
adapter
->
stats
.
rnb
c
;
adapter
->
stats
.
rlec
+
adapter
->
stats
.
mpc
+
adapter
->
stats
.
cexterr
;
adapter
->
net_stats
.
rx_dropped
=
adapter
->
stats
.
mp
c
;
adapter
->
net_stats
.
rx_length_errors
=
adapter
->
stats
.
rlec
;
adapter
->
net_stats
.
rx_crc_errors
=
adapter
->
stats
.
crcerrs
;
adapter
->
net_stats
.
rx_frame_errors
=
adapter
->
stats
.
algnerrc
;
...
...
@@ -2300,11 +2636,11 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
*/
if
(
hw
->
mac_type
==
e1000_82547
||
hw
->
mac_type
==
e1000_82547_rev_2
){
atomic_inc
(
&
adapter
->
irq_sem
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
IMC
,
~
0
);
E1000_WRITE_REG
(
hw
,
IMC
,
~
0
);
}
for
(
i
=
0
;
i
<
E1000_MAX_INTR
;
i
++
)
if
(
unlikely
(
!
e1000_clean_rx_irq
(
adapter
)
&
if
(
unlikely
(
!
adapter
->
clean_rx
(
adapter
)
&
!
e1000_clean_tx_irq
(
adapter
)))
break
;
...
...
@@ -2330,14 +2666,13 @@ e1000_clean(struct net_device *netdev, int *budget)
int
work_done
=
0
;
tx_cleaned
=
e1000_clean_tx_irq
(
adapter
);
e1000_clean_rx_irq
(
adapter
,
&
work_done
,
work_to_do
);
adapter
->
clean_rx
(
adapter
,
&
work_done
,
work_to_do
);
*
budget
-=
work_done
;
netdev
->
quota
-=
work_done
;
/* if no Tx and not enough Rx work done, exit the polling mode */
if
((
!
tx_cleaned
&&
(
work_done
<
work_to_do
))
||
!
netif_running
(
netdev
))
{
/* If no Tx and no Rx work done, exit the polling mode */
if
((
!
tx_cleaned
&&
(
work_done
==
0
))
||
!
netif_running
(
netdev
))
{
netif_rx_complete
(
netdev
);
e1000_irq_enable
(
adapter
);
return
0
;
...
...
@@ -2367,9 +2702,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
eop_desc
=
E1000_TX_DESC
(
*
tx_ring
,
eop
);
while
(
eop_desc
->
upper
.
data
&
cpu_to_le32
(
E1000_TXD_STAT_DD
))
{
/* pre-mature writeback of Tx descriptors */
/* clear (free buffers and unmap pci_mapping) */
/* previous_buffer_info */
/* Premature writeback of Tx descriptors clear (free buffers
* and unmap pci_mapping) previous_buffer_info */
if
(
likely
(
adapter
->
previous_buffer_info
.
skb
!=
NULL
))
{
e1000_unmap_and_free_tx_resource
(
adapter
,
&
adapter
->
previous_buffer_info
);
...
...
@@ -2380,26 +2714,30 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
cleaned
=
(
i
==
eop
);
/* pre-mature writeback of Tx descriptors */
/* save the cleaning of the this for the */
/* next iteration */
#ifdef NETIF_F_TSO
if
(
!
(
netdev
->
features
&
NETIF_F_TSO
))
{
#endif
e1000_unmap_and_free_tx_resource
(
adapter
,
buffer_info
);
#ifdef NETIF_F_TSO
}
else
{
if
(
cleaned
)
{
memcpy
(
&
adapter
->
previous_buffer_info
,
buffer_info
,
sizeof
(
struct
e1000_buffer
));
memset
(
buffer_info
,
0
,
memset
(
buffer_info
,
0
,
sizeof
(
struct
e1000_buffer
));
}
else
{
e1000_unmap_and_free_tx_resource
(
adapter
,
buffer_info
);
e1000_unmap_and_free_tx_resource
(
adapter
,
buffer_info
);
}
}
#endif
tx_desc
->
buffer_addr
=
0
;
tx_desc
->
lower
.
data
=
0
;
tx_desc
->
upper
.
data
=
0
;
cleaned
=
(
i
==
eop
);
if
(
unlikely
(
++
i
==
tx_ring
->
count
))
i
=
0
;
}
...
...
@@ -2416,57 +2754,107 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
netif_wake_queue
(
netdev
);
spin_unlock
(
&
adapter
->
tx_lock
);
if
(
adapter
->
detect_tx_hung
)
{
/* detect a transmit hang in hardware, this serializes the
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter
->
detect_tx_hung
=
FALSE
;
if
(
tx_ring
->
buffer_info
[
i
].
dma
&&
time_after
(
jiffies
,
tx_ring
->
buffer_info
[
i
].
time_stamp
+
HZ
)
&&
!
(
E1000_READ_REG
(
&
adapter
->
hw
,
STATUS
)
&
E1000_STATUS_TXOFF
))
if
(
tx_ring
->
buffer_info
[
i
].
dma
&&
time_after
(
jiffies
,
tx_ring
->
buffer_info
[
i
].
time_stamp
+
HZ
)
&&
!
(
E1000_READ_REG
(
&
adapter
->
hw
,
STATUS
)
&
E1000_STATUS_TXOFF
))
{
/* detected Tx unit hang */
i
=
tx_ring
->
next_to_clean
;
eop
=
tx_ring
->
buffer_info
[
i
].
next_to_watch
;
eop_desc
=
E1000_TX_DESC
(
*
tx_ring
,
eop
);
DPRINTK
(
TX_ERR
,
ERR
,
"Detected Tx Unit Hang
\n
"
" TDH <%x>
\n
"
" TDT <%x>
\n
"
" next_to_use <%x>
\n
"
" next_to_clean <%x>
\n
"
"buffer_info[next_to_clean]
\n
"
" dma <%llx>
\n
"
" time_stamp <%lx>
\n
"
" next_to_watch <%x>
\n
"
" jiffies <%lx>
\n
"
" next_to_watch.status <%x>
\n
"
,
E1000_READ_REG
(
&
adapter
->
hw
,
TDH
),
E1000_READ_REG
(
&
adapter
->
hw
,
TDT
),
tx_ring
->
next_to_use
,
i
,
tx_ring
->
buffer_info
[
i
].
dma
,
tx_ring
->
buffer_info
[
i
].
time_stamp
,
eop
,
jiffies
,
eop_desc
->
upper
.
fields
.
status
);
netif_stop_queue
(
netdev
);
}
}
#ifdef NETIF_F_TSO
if
(
unlikely
(
!
(
eop_desc
->
upper
.
data
&
cpu_to_le32
(
E1000_TXD_STAT_DD
))
&&
time_after
(
jiffies
,
adapter
->
previous_buffer_info
.
time_stamp
+
HZ
)))
e1000_unmap_and_free_tx_resource
(
adapter
,
&
adapter
->
previous_buffer_info
);
#endif
return
cleaned
;
}
/**
* e1000_rx_checksum - Receive Checksum Offload for 82543
* @adapter: board private structure
* @rx_desc: receive descriptor
* @status_err: receive descriptor status and error fields
* @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data
**/
static
inline
void
e1000_rx_checksum
(
struct
e1000_adapter
*
adapter
,
struct
e1000_rx_desc
*
rx_desc
,
uint32_t
status_err
,
uint32_t
csum
,
struct
sk_buff
*
skb
)
{
/* 82543 or newer only */
if
(
unlikely
((
adapter
->
hw
.
mac_type
<
e1000_82543
)
||
/* Ignore Checksum bit is set */
(
rx_desc
->
status
&
E1000_RXD_STAT_IXSM
)
||
/* TCP Checksum has not been calculated */
(
!
(
rx_desc
->
status
&
E1000_RXD_STAT_TCPCS
))))
{
uint16_t
status
=
(
uint16_t
)
status_err
;
uint8_t
errors
=
(
uint8_t
)(
status_err
>>
24
);
skb
->
ip_summed
=
CHECKSUM_NONE
;
return
;
}
/* At this point we know the hardware did the TCP checksum */
/* now look at the TCP checksum error bit */
if
(
rx_desc
->
errors
&
E1000_RXD_ERR_TCPE
)
{
/* 82543 or newer only */
if
(
unlikely
(
adapter
->
hw
.
mac_type
<
e1000_82543
))
return
;
/* Ignore Checksum bit is set */
if
(
unlikely
(
status
&
E1000_RXD_STAT_IXSM
))
return
;
/* TCP/UDP checksum error bit is set */
if
(
unlikely
(
errors
&
E1000_RXD_ERR_TCPE
))
{
/* let the stack verify checksum errors */
skb
->
ip_summed
=
CHECKSUM_NONE
;
adapter
->
hw_csum_err
++
;
return
;
}
/* TCP/UDP Checksum has not been calculated */
if
(
adapter
->
hw
.
mac_type
<=
e1000_82547_rev_2
)
{
if
(
!
(
status
&
E1000_RXD_STAT_TCPCS
))
return
;
}
else
{
if
(
!
(
status
&
(
E1000_RXD_STAT_TCPCS
|
E1000_RXD_STAT_UDPCS
)))
return
;
}
/* It must be a TCP or UDP packet with a valid checksum */
if
(
likely
(
status
&
E1000_RXD_STAT_TCPCS
))
{
/* TCP checksum is good */
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
adapter
->
hw_csum_good
++
;
}
else
if
(
adapter
->
hw
.
mac_type
>
e1000_82547_rev_2
)
{
/* IP fragment with UDP payload */
/* Hardware complements the payload checksum, so we undo it
* and then put the value in host order for further stack use.
*/
csum
=
ntohl
(
csum
^
0xFFFF
);
skb
->
csum
=
csum
;
skb
->
ip_summed
=
CHECKSUM_HW
;
}
adapter
->
hw_csum_good
++
;
}
/**
* e1000_clean_rx_irq - Send received data up the network stack
* e1000_clean_rx_irq - Send received data up the network stack
; legacy
* @adapter: board private structure
**/
...
...
@@ -2539,8 +2927,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
skb_put
(
skb
,
length
-
ETHERNET_FCS_SIZE
);
/* Receive Checksum Offload */
e1000_rx_checksum
(
adapter
,
rx_desc
,
skb
);
e1000_rx_checksum
(
adapter
,
(
uint32_t
)(
rx_desc
->
status
)
|
((
uint32_t
)(
rx_desc
->
errors
)
<<
24
),
rx_desc
->
csum
,
skb
);
skb
->
protocol
=
eth_type_trans
(
skb
,
netdev
);
#ifdef CONFIG_E1000_NAPI
if
(
unlikely
(
adapter
->
vlgrp
&&
...
...
@@ -2570,16 +2960,142 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
rx_desc
=
E1000_RX_DESC
(
*
rx_ring
,
i
);
}
rx_ring
->
next_to_clean
=
i
;
adapter
->
alloc_rx_buf
(
adapter
);
e1000_alloc_rx_buffers
(
adapter
);
return
cleaned
;
}
/**
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
* @adapter: board private structure
**/
static
boolean_t
#ifdef CONFIG_E1000_NAPI
e1000_clean_rx_irq_ps
(
struct
e1000_adapter
*
adapter
,
int
*
work_done
,
int
work_to_do
)
#else
e1000_clean_rx_irq_ps
(
struct
e1000_adapter
*
adapter
)
#endif
{
struct
e1000_desc_ring
*
rx_ring
=
&
adapter
->
rx_ring
;
union
e1000_rx_desc_packet_split
*
rx_desc
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
struct
pci_dev
*
pdev
=
adapter
->
pdev
;
struct
e1000_buffer
*
buffer_info
;
struct
e1000_ps_page
*
ps_page
;
struct
e1000_ps_page_dma
*
ps_page_dma
;
struct
sk_buff
*
skb
;
unsigned
int
i
,
j
;
uint32_t
length
,
staterr
;
boolean_t
cleaned
=
FALSE
;
i
=
rx_ring
->
next_to_clean
;
rx_desc
=
E1000_RX_DESC_PS
(
*
rx_ring
,
i
);
staterr
=
rx_desc
->
wb
.
middle
.
status_error
;
while
(
staterr
&
E1000_RXD_STAT_DD
)
{
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
ps_page
=
&
rx_ring
->
ps_page
[
i
];
ps_page_dma
=
&
rx_ring
->
ps_page_dma
[
i
];
#ifdef CONFIG_E1000_NAPI
if
(
unlikely
(
*
work_done
>=
work_to_do
))
break
;
(
*
work_done
)
++
;
#endif
cleaned
=
TRUE
;
pci_unmap_single
(
pdev
,
buffer_info
->
dma
,
buffer_info
->
length
,
PCI_DMA_FROMDEVICE
);
skb
=
buffer_info
->
skb
;
if
(
unlikely
(
!
(
staterr
&
E1000_RXD_STAT_EOP
)))
{
E1000_DBG
(
"%s: Packet Split buffers didn't pick up"
" the full packet
\n
"
,
netdev
->
name
);
dev_kfree_skb_irq
(
skb
);
goto
next_desc
;
}
if
(
unlikely
(
staterr
&
E1000_RXDEXT_ERR_FRAME_ERR_MASK
))
{
dev_kfree_skb_irq
(
skb
);
goto
next_desc
;
}
length
=
le16_to_cpu
(
rx_desc
->
wb
.
middle
.
length0
);
if
(
unlikely
(
!
length
))
{
E1000_DBG
(
"%s: Last part of the packet spanning"
" multiple descriptors
\n
"
,
netdev
->
name
);
dev_kfree_skb_irq
(
skb
);
goto
next_desc
;
}
/* Good Receive */
skb_put
(
skb
,
length
);
for
(
j
=
0
;
j
<
PS_PAGE_BUFFERS
;
j
++
)
{
if
(
!
(
length
=
le16_to_cpu
(
rx_desc
->
wb
.
upper
.
length
[
j
])))
break
;
pci_unmap_page
(
pdev
,
ps_page_dma
->
ps_page_dma
[
j
],
PAGE_SIZE
,
PCI_DMA_FROMDEVICE
);
ps_page_dma
->
ps_page_dma
[
j
]
=
0
;
skb_shinfo
(
skb
)
->
frags
[
j
].
page
=
ps_page
->
ps_page
[
j
];
ps_page
->
ps_page
[
j
]
=
NULL
;
skb_shinfo
(
skb
)
->
frags
[
j
].
page_offset
=
0
;
skb_shinfo
(
skb
)
->
frags
[
j
].
size
=
length
;
skb_shinfo
(
skb
)
->
nr_frags
++
;
skb
->
len
+=
length
;
skb
->
data_len
+=
length
;
}
e1000_rx_checksum
(
adapter
,
staterr
,
rx_desc
->
wb
.
lower
.
hi_dword
.
csum_ip
.
csum
,
skb
);
skb
->
protocol
=
eth_type_trans
(
skb
,
netdev
);
#ifdef HAVE_RX_ZERO_COPY
if
(
likely
(
rx_desc
->
wb
.
upper
.
header_status
&
E1000_RXDPS_HDRSTAT_HDRSP
))
skb_shinfo
(
skb
)
->
zero_copy
=
TRUE
;
#endif
#ifdef CONFIG_E1000_NAPI
if
(
unlikely
(
adapter
->
vlgrp
&&
(
staterr
&
E1000_RXD_STAT_VP
)))
{
vlan_hwaccel_receive_skb
(
skb
,
adapter
->
vlgrp
,
le16_to_cpu
(
rx_desc
->
wb
.
middle
.
vlan
&
E1000_RXD_SPC_VLAN_MASK
));
}
else
{
netif_receive_skb
(
skb
);
}
#else
/* CONFIG_E1000_NAPI */
if
(
unlikely
(
adapter
->
vlgrp
&&
(
staterr
&
E1000_RXD_STAT_VP
)))
{
vlan_hwaccel_rx
(
skb
,
adapter
->
vlgrp
,
le16_to_cpu
(
rx_desc
->
wb
.
middle
.
vlan
&
E1000_RXD_SPC_VLAN_MASK
));
}
else
{
netif_rx
(
skb
);
}
#endif
/* CONFIG_E1000_NAPI */
netdev
->
last_rx
=
jiffies
;
next_desc:
rx_desc
->
wb
.
middle
.
status_error
&=
~
0xFF
;
buffer_info
->
skb
=
NULL
;
if
(
unlikely
(
++
i
==
rx_ring
->
count
))
i
=
0
;
rx_desc
=
E1000_RX_DESC_PS
(
*
rx_ring
,
i
);
staterr
=
rx_desc
->
wb
.
middle
.
status_error
;
}
rx_ring
->
next_to_clean
=
i
;
adapter
->
alloc_rx_buf
(
adapter
);
return
cleaned
;
}
/**
* e1000_alloc_rx_buffers - Replace used receive buffers
* e1000_alloc_rx_buffers - Replace used receive buffers
; legacy & extended
* @adapter: address of board private structure
**/
...
...
@@ -2592,43 +3108,43 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
struct
e1000_rx_desc
*
rx_desc
;
struct
e1000_buffer
*
buffer_info
;
struct
sk_buff
*
skb
;
unsigned
int
i
,
bufsz
;
unsigned
int
i
;
unsigned
int
bufsz
=
adapter
->
rx_buffer_len
+
NET_IP_ALIGN
;
i
=
rx_ring
->
next_to_use
;
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
while
(
!
buffer_info
->
skb
)
{
bufsz
=
adapter
->
rx_buffer_len
+
NET_IP_ALIGN
;
skb
=
dev_alloc_skb
(
bufsz
);
if
(
unlikely
(
!
skb
))
{
/* Better luck next round */
break
;
}
/*
fix for errata 23, can
t cross 64kB boundary */
/*
Fix for errata 23, can'
t cross 64kB boundary */
if
(
!
e1000_check_64k_bound
(
adapter
,
skb
->
data
,
bufsz
))
{
struct
sk_buff
*
oldskb
=
skb
;
DPRINTK
(
RX_ERR
,
ERR
,
"skb align check failed: %u bytes at %p
\n
"
,
bufsz
,
skb
->
data
);
/* try again, without freeing the previous */
DPRINTK
(
RX_ERR
,
ERR
,
"skb align check failed: %u bytes "
"at %p
\n
"
,
bufsz
,
skb
->
data
);
/* Try again, without freeing the previous */
skb
=
dev_alloc_skb
(
bufsz
);
/* Failed allocation, critical failure */
if
(
!
skb
)
{
dev_kfree_skb
(
oldskb
);
break
;
}
if
(
!
e1000_check_64k_bound
(
adapter
,
skb
->
data
,
bufsz
))
{
/* give up */
dev_kfree_skb
(
skb
);
dev_kfree_skb
(
oldskb
);
break
;
/* while !buffer_info->skb */
}
else
{
/*
move on with the new one
*/
/*
Use new allocation
*/
dev_kfree_skb
(
oldskb
);
}
}
/* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed
...
...
@@ -2644,25 +3160,23 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
adapter
->
rx_buffer_len
,
PCI_DMA_FROMDEVICE
);
/*
fix for errata 23, can
t cross 64kB boundary */
if
(
!
e1000_check_64k_bound
(
adapter
,
/*
Fix for errata 23, can'
t cross 64kB boundary */
if
(
!
e1000_check_64k_bound
(
adapter
,
(
void
*
)(
unsigned
long
)
buffer_info
->
dma
,
adapter
->
rx_buffer_len
))
{
DPRINTK
(
RX_ERR
,
ERR
,
"dma align check failed: %u bytes at %
ld
\n
"
,
adapter
->
rx_buffer_len
,
(
unsigned
long
)
buffer_info
->
dma
);
DPRINTK
(
RX_ERR
,
ERR
,
"dma align check failed: %u bytes at %
p
\n
"
,
adapter
->
rx_buffer_len
,
(
void
*
)(
unsigned
long
)
buffer_info
->
dma
);
dev_kfree_skb
(
skb
);
buffer_info
->
skb
=
NULL
;
pci_unmap_single
(
pdev
,
buffer_info
->
dma
,
pci_unmap_single
(
pdev
,
buffer_info
->
dma
,
adapter
->
rx_buffer_len
,
PCI_DMA_FROMDEVICE
);
break
;
/* while !buffer_info->skb */
}
rx_desc
=
E1000_RX_DESC
(
*
rx_ring
,
i
);
rx_desc
->
buffer_addr
=
cpu_to_le64
(
buffer_info
->
dma
);
...
...
@@ -2672,7 +3186,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
* applicable for weak-ordered memory model archs,
* such as IA-64). */
wmb
();
E1000_WRITE_REG
(
&
adapter
->
hw
,
RDT
,
i
);
}
...
...
@@ -2683,6 +3196,95 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
rx_ring
->
next_to_use
=
i
;
}
/**
* e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @adapter: address of board private structure
**/
static
void
e1000_alloc_rx_buffers_ps
(
struct
e1000_adapter
*
adapter
)
{
struct
e1000_desc_ring
*
rx_ring
=
&
adapter
->
rx_ring
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
struct
pci_dev
*
pdev
=
adapter
->
pdev
;
union
e1000_rx_desc_packet_split
*
rx_desc
;
struct
e1000_buffer
*
buffer_info
;
struct
e1000_ps_page
*
ps_page
;
struct
e1000_ps_page_dma
*
ps_page_dma
;
struct
sk_buff
*
skb
;
unsigned
int
i
,
j
;
i
=
rx_ring
->
next_to_use
;
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
ps_page
=
&
rx_ring
->
ps_page
[
i
];
ps_page_dma
=
&
rx_ring
->
ps_page_dma
[
i
];
while
(
!
buffer_info
->
skb
)
{
rx_desc
=
E1000_RX_DESC_PS
(
*
rx_ring
,
i
);
for
(
j
=
0
;
j
<
PS_PAGE_BUFFERS
;
j
++
)
{
if
(
unlikely
(
!
ps_page
->
ps_page
[
j
]))
{
ps_page
->
ps_page
[
j
]
=
alloc_page
(
GFP_ATOMIC
);
if
(
unlikely
(
!
ps_page
->
ps_page
[
j
]))
goto
no_buffers
;
ps_page_dma
->
ps_page_dma
[
j
]
=
pci_map_page
(
pdev
,
ps_page
->
ps_page
[
j
],
0
,
PAGE_SIZE
,
PCI_DMA_FROMDEVICE
);
}
/* Refresh the desc even if buffer_addrs didn't
* change because each write-back erases this info.
*/
rx_desc
->
read
.
buffer_addr
[
j
+
1
]
=
cpu_to_le64
(
ps_page_dma
->
ps_page_dma
[
j
]);
}
skb
=
dev_alloc_skb
(
adapter
->
rx_ps_bsize0
+
NET_IP_ALIGN
);
if
(
unlikely
(
!
skb
))
break
;
/* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed
*/
skb_reserve
(
skb
,
NET_IP_ALIGN
);
skb
->
dev
=
netdev
;
buffer_info
->
skb
=
skb
;
buffer_info
->
length
=
adapter
->
rx_ps_bsize0
;
buffer_info
->
dma
=
pci_map_single
(
pdev
,
skb
->
data
,
adapter
->
rx_ps_bsize0
,
PCI_DMA_FROMDEVICE
);
rx_desc
->
read
.
buffer_addr
[
0
]
=
cpu_to_le64
(
buffer_info
->
dma
);
if
(
unlikely
((
i
&
~
(
E1000_RX_BUFFER_WRITE
-
1
))
==
i
))
{
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64). */
wmb
();
/* Hardware increments by 16 bytes, but packet split
* descriptors are 32 bytes...so we increment tail
* twice as much.
*/
E1000_WRITE_REG
(
&
adapter
->
hw
,
RDT
,
i
<<
1
);
}
if
(
unlikely
(
++
i
==
rx_ring
->
count
))
i
=
0
;
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
ps_page
=
&
rx_ring
->
ps_page
[
i
];
ps_page_dma
=
&
rx_ring
->
ps_page_dma
[
i
];
}
no_buffers:
rx_ring
->
next_to_use
=
i
;
}
/**
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
* @adapter:
...
...
@@ -2856,9 +3458,10 @@ void
e1000_pci_set_mwi
(
struct
e1000_hw
*
hw
)
{
struct
e1000_adapter
*
adapter
=
hw
->
back
;
int
ret_val
=
pci_set_mwi
(
adapter
->
pdev
);
i
nt
ret
;
ret
=
pci_set_mwi
(
adapter
->
pdev
);
i
f
(
ret_val
)
DPRINTK
(
PROBE
,
ERR
,
"Error in setting MWI
\n
"
);
}
void
...
...
@@ -2917,6 +3520,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
rctl
|=
E1000_RCTL_VFE
;
rctl
&=
~
E1000_RCTL_CFIEN
;
E1000_WRITE_REG
(
&
adapter
->
hw
,
RCTL
,
rctl
);
e1000_update_mng_vlan
(
adapter
);
}
else
{
/* disable VLAN tag insert/strip */
ctrl
=
E1000_READ_REG
(
&
adapter
->
hw
,
CTRL
);
...
...
@@ -2927,6 +3531,10 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
rctl
=
E1000_READ_REG
(
&
adapter
->
hw
,
RCTL
);
rctl
&=
~
E1000_RCTL_VFE
;
E1000_WRITE_REG
(
&
adapter
->
hw
,
RCTL
,
rctl
);
if
(
adapter
->
mng_vlan_id
!=
(
uint16_t
)
E1000_MNG_VLAN_NONE
)
{
e1000_vlan_rx_kill_vid
(
netdev
,
adapter
->
mng_vlan_id
);
adapter
->
mng_vlan_id
=
E1000_MNG_VLAN_NONE
;
}
}
e1000_irq_enable
(
adapter
);
...
...
@@ -2937,7 +3545,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
uint32_t
vfta
,
index
;
if
((
adapter
->
hw
.
mng_cookie
.
status
&
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
)
&&
(
vid
==
adapter
->
mng_vlan_id
))
return
;
/* add VID to filter table */
index
=
(
vid
>>
5
)
&
0x7F
;
vfta
=
E1000_READ_REG_ARRAY
(
&
adapter
->
hw
,
VFTA
,
index
);
...
...
@@ -2958,6 +3569,10 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
e1000_irq_enable
(
adapter
);
if
((
adapter
->
hw
.
mng_cookie
.
status
&
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
)
&&
(
vid
==
adapter
->
mng_vlan_id
))
return
;
/* remove VID from filter table */
index
=
(
vid
>>
5
)
&
0x7F
;
vfta
=
E1000_READ_REG_ARRAY
(
&
adapter
->
hw
,
VFTA
,
index
);
...
...
@@ -3004,8 +3619,7 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
break
;
case
SPEED_1000
+
DUPLEX_HALF
:
/* not supported */
default:
DPRINTK
(
PROBE
,
ERR
,
"Unsupported Speed/Duplexity configuration
\n
"
);
DPRINTK
(
PROBE
,
ERR
,
"Unsupported Speed/Duplex configuration
\n
"
);
return
-
EINVAL
;
}
return
0
;
...
...
@@ -3033,7 +3647,7 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
{
struct
net_device
*
netdev
=
pci_get_drvdata
(
pdev
);
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
uint32_t
ctrl
,
ctrl_ext
,
rctl
,
manc
,
status
;
uint32_t
ctrl
,
ctrl_ext
,
rctl
,
manc
,
status
,
swsm
;
uint32_t
wufc
=
adapter
->
wol
;
netif_device_detach
(
netdev
);
...
...
@@ -3075,6 +3689,9 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
E1000_WRITE_REG
(
&
adapter
->
hw
,
CTRL_EXT
,
ctrl_ext
);
}
/* Allow time for pending master requests to run */
e1000_disable_pciex_master
(
&
adapter
->
hw
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
WUC
,
E1000_WUC_PME_EN
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
WUFC
,
wufc
);
pci_enable_wake
(
pdev
,
3
,
1
);
...
...
@@ -3099,6 +3716,16 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
}
}
switch
(
adapter
->
hw
.
mac_type
)
{
case
e1000_82573
:
swsm
=
E1000_READ_REG
(
&
adapter
->
hw
,
SWSM
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
SWSM
,
swsm
&
~
E1000_SWSM_DRV_LOAD
);
break
;
default:
break
;
}
pci_disable_device
(
pdev
);
state
=
(
state
>
0
)
?
3
:
0
;
...
...
@@ -3113,12 +3740,11 @@ e1000_resume(struct pci_dev *pdev)
{
struct
net_device
*
netdev
=
pci_get_drvdata
(
pdev
);
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
uint32_t
manc
,
ret
;
uint32_t
manc
,
ret
,
swsm
;
pci_set_power_state
(
pdev
,
0
);
pci_restore_state
(
pdev
);
ret
=
pci_enable_device
(
pdev
);
if
(
pdev
->
is_busmaster
)
pci_set_master
(
pdev
);
pci_enable_wake
(
pdev
,
3
,
0
);
...
...
@@ -3139,10 +3765,19 @@ e1000_resume(struct pci_dev *pdev)
E1000_WRITE_REG
(
&
adapter
->
hw
,
MANC
,
manc
);
}
switch
(
adapter
->
hw
.
mac_type
)
{
case
e1000_82573
:
swsm
=
E1000_READ_REG
(
&
adapter
->
hw
,
SWSM
);
E1000_WRITE_REG
(
&
adapter
->
hw
,
SWSM
,
swsm
|
E1000_SWSM_DRV_LOAD
);
break
;
default:
break
;
}
return
0
;
}
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
...
...
@@ -3150,7 +3785,7 @@ e1000_resume(struct pci_dev *pdev)
* the interrupt routine is executing.
*/
static
void
e1000_netpoll
(
struct
net_device
*
netdev
)
e1000_netpoll
(
struct
net_device
*
netdev
)
{
struct
e1000_adapter
*
adapter
=
netdev
->
priv
;
disable_irq
(
adapter
->
pdev
->
irq
);
...
...
drivers/net/e1000/e1000_osdep.h
View file @
1b981021
/*******************************************************************************
Copyright(c) 1999 - 200
4
Intel Corporation. All rights reserved.
Copyright(c) 1999 - 200
5
Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
...
...
@@ -42,7 +42,12 @@
#include <linux/sched.h>
#ifndef msec_delay
#define msec_delay(x) msleep(x)
#define msec_delay(x) do { if(in_interrupt()) { \
/* Don't mdelay in interrupt context! */
\
BUG(); \
} else { \
msleep(x); \
} } while(0)
/* Some workarounds require millisecond delays and are run during interrupt
* context. Most notably, when establishing link, the phy may need tweaking
...
...
@@ -96,6 +101,29 @@ typedef enum {
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 2)))
#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
writew((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1))))
#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
readw((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1)))
#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
writeb((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset))))
#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
readb((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset)))
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
#endif
/* _E1000_OSDEP_H_ */
drivers/net/e1000/e1000_param.c
View file @
1b981021
/*******************************************************************************
Copyright(c) 1999 - 200
4
Intel Corporation. All rights reserved.
Copyright(c) 1999 - 200
5
Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
...
...
@@ -478,7 +478,6 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK
(
PROBE
,
INFO
,
"%s set to dynamic mode
\n
"
,
opt
.
name
);
break
;
case
-
1
:
default:
e1000_validate_option
(
&
adapter
->
itr
,
&
opt
,
adapter
);
...
...
drivers/net/ixgb/ixgb.h
View file @
1b981021
...
...
@@ -110,7 +110,7 @@ struct ixgb_adapter;
#define IXGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGB_RX_BUFFER_WRITE
16
/* Must be power of 2 */
#define IXGB_RX_BUFFER_WRITE
4
/* Must be power of 2 */
/* only works for sizes that are powers of 2 */
#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
...
...
drivers/net/ixgb/ixgb_ee.c
View file @
1b981021
...
...
@@ -411,7 +411,7 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
ixgb_cleanup_eeprom
(
hw
);
/* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
ee_map
->
init_ctrl_reg_1
=
EEPROM_ICW1_SIGNATURE_CLEAR
;
ee_map
->
init_ctrl_reg_1
=
le16_to_cpu
(
EEPROM_ICW1_SIGNATURE_CLEAR
)
;
return
;
}
...
...
@@ -483,7 +483,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
DEBUGOUT
(
"ixgb_ee: Checksum invalid.
\n
"
);
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map
->
init_ctrl_reg_1
=
EEPROM_ICW1_SIGNATURE_CLEAR
;
ee_map
->
init_ctrl_reg_1
=
le16_to_cpu
(
EEPROM_ICW1_SIGNATURE_CLEAR
)
;
return
(
FALSE
);
}
...
...
@@ -579,7 +579,7 @@ ixgb_get_ee_compatibility(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
compatibility
);
return
(
le16_to_cpu
(
ee_map
->
compatibility
)
);
return
(
0
);
}
...
...
@@ -616,7 +616,7 @@ ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
init_ctrl_reg_1
);
return
(
le16_to_cpu
(
ee_map
->
init_ctrl_reg_1
)
);
return
(
0
);
}
...
...
@@ -635,7 +635,7 @@ ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
init_ctrl_reg_2
);
return
(
le16_to_cpu
(
ee_map
->
init_ctrl_reg_2
)
);
return
(
0
);
}
...
...
@@ -654,7 +654,7 @@ ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
subsystem_id
);
return
(
le16_to_cpu
(
ee_map
->
subsystem_id
)
);
return
(
0
);
}
...
...
@@ -673,7 +673,7 @@ ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
subvendor_id
);
return
(
le16_to_cpu
(
ee_map
->
subvendor_id
)
);
return
(
0
);
}
...
...
@@ -692,7 +692,7 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
device_id
);
return
(
le16_to_cpu
(
ee_map
->
device_id
)
);
return
(
0
);
}
...
...
@@ -711,7 +711,7 @@ ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
vendor_id
);
return
(
le16_to_cpu
(
ee_map
->
vendor_id
)
);
return
(
0
);
}
...
...
@@ -730,7 +730,7 @@ ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
swdpins_reg
);
return
(
le16_to_cpu
(
ee_map
->
swdpins_reg
)
);
return
(
0
);
}
...
...
@@ -749,7 +749,7 @@ ixgb_get_ee_d3_power(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
d3_power
);
return
(
le16_to_cpu
(
ee_map
->
d3_power
)
);
return
(
0
);
}
...
...
@@ -768,7 +768,7 @@ ixgb_get_ee_d0_power(struct ixgb_hw *hw)
struct
ixgb_ee_map_type
*
ee_map
=
(
struct
ixgb_ee_map_type
*
)
hw
->
eeprom
;
if
(
ixgb_check_and_get_eeprom_data
(
hw
)
==
TRUE
)
return
(
ee_map
->
d0_power
);
return
(
le16_to_cpu
(
ee_map
->
d0_power
)
);
return
(
0
);
}
drivers/net/ixgb/ixgb_ethtool.c
View file @
1b981021
...
...
@@ -252,7 +252,9 @@ ixgb_get_regs(struct net_device *netdev,
uint32_t
*
reg_start
=
reg
;
uint8_t
i
;
regs
->
version
=
(
adapter
->
hw
.
device_id
<<
16
)
|
adapter
->
hw
.
subsystem_id
;
/* the 1 (one) below indicates an attempt at versioning, if the
* interface in ethtool or the driver this 1 should be incremented */
regs
->
version
=
(
1
<<
24
)
|
hw
->
revision_id
<<
16
|
hw
->
device_id
;
/* General Registers */
*
reg
++
=
IXGB_READ_REG
(
hw
,
CTRL0
);
/* 0 */
...
...
drivers/net/ixgb/ixgb_main.c
View file @
1b981021
...
...
@@ -47,7 +47,7 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
char
ixgb_driver_version
[]
=
"1.0.9
0
-k2"
DRIVERNAPI
;
char
ixgb_driver_version
[]
=
"1.0.9
5
-k2"
DRIVERNAPI
;
char
ixgb_copyright
[]
=
"Copyright (c) 1999-2005 Intel Corporation."
;
/* ixgb_pci_tbl - PCI Device ID Table
...
...
@@ -103,6 +103,7 @@ static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static
int
ixgb_set_mac
(
struct
net_device
*
netdev
,
void
*
p
);
static
irqreturn_t
ixgb_intr
(
int
irq
,
void
*
data
,
struct
pt_regs
*
regs
);
static
boolean_t
ixgb_clean_tx_irq
(
struct
ixgb_adapter
*
adapter
);
#ifdef CONFIG_IXGB_NAPI
static
int
ixgb_clean
(
struct
net_device
*
netdev
,
int
*
budget
);
static
boolean_t
ixgb_clean_rx_irq
(
struct
ixgb_adapter
*
adapter
,
...
...
@@ -120,21 +121,11 @@ static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static
void
ixgb_vlan_rx_kill_vid
(
struct
net_device
*
netdev
,
uint16_t
vid
);
static
void
ixgb_restore_vlan
(
struct
ixgb_adapter
*
adapter
);
static
int
ixgb_notify_reboot
(
struct
notifier_block
*
,
unsigned
long
event
,
void
*
ptr
);
static
int
ixgb_suspend
(
struct
pci_dev
*
pdev
,
uint32_t
state
);
#ifdef CONFIG_NET_POLL_CONTROLLER
/* for netdump / net console */
static
void
ixgb_netpoll
(
struct
net_device
*
dev
);
#endif
struct
notifier_block
ixgb_notifier_reboot
=
{
.
notifier_call
=
ixgb_notify_reboot
,
.
next
=
NULL
,
.
priority
=
0
};
/* Exported from other modules */
extern
void
ixgb_check_options
(
struct
ixgb_adapter
*
adapter
);
...
...
@@ -144,9 +135,6 @@ static struct pci_driver ixgb_driver = {
.
id_table
=
ixgb_pci_tbl
,
.
probe
=
ixgb_probe
,
.
remove
=
__devexit_p
(
ixgb_remove
),
/* Power Managment Hooks */
.
suspend
=
NULL
,
.
resume
=
NULL
};
MODULE_AUTHOR
(
"Intel Corporation, <linux.nics@intel.com>"
);
...
...
@@ -169,17 +157,12 @@ MODULE_LICENSE("GPL");
static
int
__init
ixgb_init_module
(
void
)
{
int
ret
;
printk
(
KERN_INFO
"%s - version %s
\n
"
,
ixgb_driver_string
,
ixgb_driver_version
);
printk
(
KERN_INFO
"%s
\n
"
,
ixgb_copyright
);
ret
=
pci_module_init
(
&
ixgb_driver
);
if
(
ret
>=
0
)
{
register_reboot_notifier
(
&
ixgb_notifier_reboot
);
}
return
ret
;
return
pci_module_init
(
&
ixgb_driver
);
}
module_init
(
ixgb_init_module
);
...
...
@@ -194,7 +177,6 @@ module_init(ixgb_init_module);
static
void
__exit
ixgb_exit_module
(
void
)
{
unregister_reboot_notifier
(
&
ixgb_notifier_reboot
);
pci_unregister_driver
(
&
ixgb_driver
);
}
...
...
@@ -225,7 +207,7 @@ ixgb_irq_enable(struct ixgb_adapter *adapter)
if
(
atomic_dec_and_test
(
&
adapter
->
irq_sem
))
{
IXGB_WRITE_REG
(
&
adapter
->
hw
,
IMS
,
IXGB_INT_RXT0
|
IXGB_INT_RXDMT0
|
IXGB_INT_TXDW
|
IXGB_INT_RXO
|
IXGB_INT_LSC
);
IXGB_INT_LSC
);
IXGB_WRITE_FLUSH
(
&
adapter
->
hw
);
}
}
...
...
@@ -1209,10 +1191,10 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
IXGB_CONTEXT_DESC_CMD_TSE
|
IXGB_CONTEXT_DESC_CMD_IP
|
IXGB_CONTEXT_DESC_CMD_TCP
|
IXGB_CONTEXT_DESC_CMD_RS
|
IXGB_CONTEXT_DESC_CMD_IDE
|
(
skb
->
len
-
(
hdr_len
)));
if
(
++
i
==
adapter
->
tx_ring
.
count
)
i
=
0
;
adapter
->
tx_ring
.
next_to_use
=
i
;
...
...
@@ -1247,7 +1229,6 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
context_desc
->
mss
=
0
;
context_desc
->
cmd_type_len
=
cpu_to_le32
(
IXGB_CONTEXT_DESC_TYPE
|
IXGB_TX_DESC_CMD_RS
|
IXGB_TX_DESC_CMD_IDE
);
if
(
++
i
==
adapter
->
tx_ring
.
count
)
i
=
0
;
...
...
@@ -1273,6 +1254,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
unsigned
int
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
unsigned
int
f
;
len
-=
skb
->
data_len
;
i
=
tx_ring
->
next_to_use
;
...
...
@@ -1526,14 +1508,33 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
void
ixgb_update_stats
(
struct
ixgb_adapter
*
adapter
)
{
struct
net_device
*
netdev
=
adapter
->
netdev
;
if
((
netdev
->
flags
&
IFF_PROMISC
)
||
(
netdev
->
flags
&
IFF_ALLMULTI
)
||
(
netdev
->
mc_count
>
IXGB_MAX_NUM_MULTICAST_ADDRESSES
))
{
u64
multi
=
IXGB_READ_REG
(
&
adapter
->
hw
,
MPRCL
);
u32
bcast_l
=
IXGB_READ_REG
(
&
adapter
->
hw
,
BPRCL
);
u32
bcast_h
=
IXGB_READ_REG
(
&
adapter
->
hw
,
BPRCH
);
u64
bcast
=
((
u64
)
bcast_h
<<
32
)
|
bcast_l
;
multi
|=
((
u64
)
IXGB_READ_REG
(
&
adapter
->
hw
,
MPRCH
)
<<
32
);
/* fix up multicast stats by removing broadcasts */
multi
-=
bcast
;
adapter
->
stats
.
mprcl
+=
(
multi
&
0xFFFFFFFF
);
adapter
->
stats
.
mprch
+=
(
multi
>>
32
);
adapter
->
stats
.
bprcl
+=
bcast_l
;
adapter
->
stats
.
bprch
+=
bcast_h
;
}
else
{
adapter
->
stats
.
mprcl
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
MPRCL
);
adapter
->
stats
.
mprch
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
MPRCH
);
adapter
->
stats
.
bprcl
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
BPRCL
);
adapter
->
stats
.
bprch
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
BPRCH
);
}
adapter
->
stats
.
tprl
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
TPRL
);
adapter
->
stats
.
tprh
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
TPRH
);
adapter
->
stats
.
gprcl
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
GPRCL
);
adapter
->
stats
.
gprch
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
GPRCH
);
adapter
->
stats
.
bprcl
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
BPRCL
);
adapter
->
stats
.
bprch
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
BPRCH
);
adapter
->
stats
.
mprcl
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
MPRCL
);
adapter
->
stats
.
mprch
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
MPRCH
);
adapter
->
stats
.
uprcl
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
UPRCL
);
adapter
->
stats
.
uprch
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
UPRCH
);
adapter
->
stats
.
vprcl
+=
IXGB_READ_REG
(
&
adapter
->
hw
,
VPRCL
);
...
...
@@ -1823,7 +1824,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
struct
pci_dev
*
pdev
=
adapter
->
pdev
;
struct
ixgb_rx_desc
*
rx_desc
,
*
next_rxd
;
struct
ixgb_buffer
*
buffer_info
,
*
next_buffer
,
*
next2_buffer
;
struct
sk_buff
*
skb
,
*
next_skb
;
uint32_t
length
;
unsigned
int
i
,
j
;
boolean_t
cleaned
=
FALSE
;
...
...
@@ -1833,6 +1833,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
while
(
rx_desc
->
status
&
IXGB_RX_DESC_STATUS_DD
)
{
struct
sk_buff
*
skb
,
*
next_skb
;
u8
status
;
#ifdef CONFIG_IXGB_NAPI
if
(
*
work_done
>=
work_to_do
)
...
...
@@ -1840,7 +1842,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
(
*
work_done
)
++
;
#endif
status
=
rx_desc
->
status
;
skb
=
buffer_info
->
skb
;
prefetch
(
skb
->
data
);
if
(
++
i
==
rx_ring
->
count
)
i
=
0
;
...
...
@@ -1855,7 +1859,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
next_skb
=
next_buffer
->
skb
;
prefetch
(
next_skb
);
cleaned
=
TRUE
;
pci_unmap_single
(
pdev
,
...
...
@@ -1865,7 +1868,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
length
=
le16_to_cpu
(
rx_desc
->
length
);
if
(
unlikely
(
!
(
rx_desc
->
status
&
IXGB_RX_DESC_STATUS_EOP
)))
{
if
(
unlikely
(
!
(
status
&
IXGB_RX_DESC_STATUS_EOP
)))
{
/* All receives must fit into a single buffer */
...
...
@@ -1873,12 +1876,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
"length<%x>
\n
"
,
length
);
dev_kfree_skb_irq
(
skb
);
rx_desc
->
status
=
0
;
buffer_info
->
skb
=
NULL
;
rx_desc
=
next_rxd
;
buffer_info
=
next_buffer
;
continue
;
goto
rxdesc_done
;
}
if
(
unlikely
(
rx_desc
->
errors
...
...
@@ -1887,12 +1885,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
IXGB_RX_DESC_ERRORS_RXE
)))
{
dev_kfree_skb_irq
(
skb
);
rx_desc
->
status
=
0
;
buffer_info
->
skb
=
NULL
;
rx_desc
=
next_rxd
;
buffer_info
=
next_buffer
;
continue
;
goto
rxdesc_done
;
}
/* Good Receive */
...
...
@@ -1903,7 +1896,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
skb
->
protocol
=
eth_type_trans
(
skb
,
netdev
);
#ifdef CONFIG_IXGB_NAPI
if
(
adapter
->
vlgrp
&&
(
rx_desc
->
status
&
IXGB_RX_DESC_STATUS_VP
))
{
if
(
adapter
->
vlgrp
&&
(
status
&
IXGB_RX_DESC_STATUS_VP
))
{
vlan_hwaccel_receive_skb
(
skb
,
adapter
->
vlgrp
,
le16_to_cpu
(
rx_desc
->
special
)
&
IXGB_RX_DESC_SPECIAL_VLAN_MASK
);
...
...
@@ -1911,7 +1904,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
netif_receive_skb
(
skb
);
}
#else
/* CONFIG_IXGB_NAPI */
if
(
adapter
->
vlgrp
&&
(
rx_desc
->
status
&
IXGB_RX_DESC_STATUS_VP
))
{
if
(
adapter
->
vlgrp
&&
(
status
&
IXGB_RX_DESC_STATUS_VP
))
{
vlan_hwaccel_rx
(
skb
,
adapter
->
vlgrp
,
le16_to_cpu
(
rx_desc
->
special
)
&
IXGB_RX_DESC_SPECIAL_VLAN_MASK
);
...
...
@@ -1921,9 +1914,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
#endif
/* CONFIG_IXGB_NAPI */
netdev
->
last_rx
=
jiffies
;
rxdesc_done:
/* clean up descriptor, might be written over by hw */
rx_desc
->
status
=
0
;
buffer_info
->
skb
=
NULL
;
/* use prefetched values */
rx_desc
=
next_rxd
;
buffer_info
=
next_buffer
;
}
...
...
@@ -1959,8 +1955,8 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
num_group_tail_writes
=
IXGB_RX_BUFFER_WRITE
;
/* leave
one descriptor
unused */
while
(
--
cleancount
>
0
)
{
/* leave
three descriptors
unused */
while
(
--
cleancount
>
2
)
{
rx_desc
=
IXGB_RX_DESC
(
*
rx_ring
,
i
);
skb
=
dev_alloc_skb
(
adapter
->
rx_buffer_len
+
NET_IP_ALIGN
);
...
...
@@ -1987,6 +1983,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
PCI_DMA_FROMDEVICE
);
rx_desc
->
buff_addr
=
cpu_to_le64
(
buffer_info
->
dma
);
/* guarantee DD bit not set now before h/w gets descriptor
* this is the rest of the workaround for h/w double
* writeback. */
rx_desc
->
status
=
0
;
if
((
i
&
~
(
num_group_tail_writes
-
1
))
==
i
)
{
/* Force memory writes to complete before letting h/w
...
...
@@ -2099,54 +2099,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
}
}
/**
* ixgb_notify_reboot - handles OS notification of reboot event.
* @param nb notifier block, unused
* @param event Event being passed to driver to act upon
* @param p A pointer to our net device
**/
static
int
ixgb_notify_reboot
(
struct
notifier_block
*
nb
,
unsigned
long
event
,
void
*
p
)
{
struct
pci_dev
*
pdev
=
NULL
;
switch
(
event
)
{
case
SYS_DOWN
:
case
SYS_HALT
:
case
SYS_POWER_OFF
:
while
((
pdev
=
pci_find_device
(
PCI_ANY_ID
,
PCI_ANY_ID
,
pdev
)))
{
if
(
pci_dev_driver
(
pdev
)
==
&
ixgb_driver
)
ixgb_suspend
(
pdev
,
3
);
}
}
return
NOTIFY_DONE
;
}
/**
* ixgb_suspend - driver suspend function called from notify.
* @param pdev pci driver structure used for passing to
* @param state power state to enter
**/
static
int
ixgb_suspend
(
struct
pci_dev
*
pdev
,
uint32_t
state
)
{
struct
net_device
*
netdev
=
pci_get_drvdata
(
pdev
);
struct
ixgb_adapter
*
adapter
=
netdev
->
priv
;
netif_device_detach
(
netdev
);
if
(
netif_running
(
netdev
))
ixgb_down
(
adapter
,
TRUE
);
pci_save_state
(
pdev
);
state
=
(
state
>
0
)
?
3
:
0
;
pci_set_power_state
(
pdev
,
state
);
msec_delay
(
200
);
return
0
;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
...
...
@@ -2157,6 +2109,7 @@ ixgb_suspend(struct pci_dev *pdev, uint32_t state)
static
void
ixgb_netpoll
(
struct
net_device
*
dev
)
{
struct
ixgb_adapter
*
adapter
=
dev
->
priv
;
disable_irq
(
adapter
->
pdev
->
irq
);
ixgb_intr
(
adapter
->
pdev
->
irq
,
dev
,
NULL
);
enable_irq
(
adapter
->
pdev
->
irq
);
...
...
drivers/net/ixgb/ixgb_osdep.h
View file @
1b981021
...
...
@@ -45,8 +45,7 @@
/* Don't mdelay in interrupt context! */
\
BUG(); \
} else { \
set_current_state(TASK_UNINTERRUPTIBLE); \
schedule_timeout((x * HZ)/1000 + 2); \
msleep(x); \
} } while(0)
#endif
...
...
drivers/net/pcnet32.c
View file @
1b981021
...
...
@@ -22,8 +22,8 @@
*************************************************************************/
#define DRV_NAME "pcnet32"
#define DRV_VERSION "1.30
i
"
#define DRV_RELDATE "
06.28.2004
"
#define DRV_VERSION "1.30
j
"
#define DRV_RELDATE "
29.04.2005
"
#define PFX DRV_NAME ": "
static
const
char
*
version
=
...
...
@@ -256,6 +256,7 @@ static int homepna[MAX_UNITS];
* homepna for selecting HomePNA mode for PCNet/Home 79C978.
* v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
* v1.30i 28 Jun 2004 Don Fry change to use module_param.
* v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
*/
...
...
@@ -395,6 +396,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev);
static
int
pcnet32_get_regs_len
(
struct
net_device
*
dev
);
static
void
pcnet32_get_regs
(
struct
net_device
*
dev
,
struct
ethtool_regs
*
regs
,
void
*
ptr
);
static
void
pcnet32_purge_tx_ring
(
struct
net_device
*
dev
);
enum
pci_flags_bit
{
PCI_USES_IO
=
1
,
PCI_USES_MEM
=
2
,
PCI_USES_MASTER
=
4
,
...
...
@@ -785,6 +787,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1)
}
clean_up:
pcnet32_purge_tx_ring
(
dev
);
x
=
a
->
read_csr
(
ioaddr
,
15
)
&
0xFFFF
;
a
->
write_csr
(
ioaddr
,
15
,
(
x
&
~
0x0044
));
/* reset bits 6 and 2 */
...
...
drivers/net/tulip/media.c
View file @
1b981021
...
...
@@ -174,6 +174,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
break
;
}
spin_unlock_irqrestore
(
&
tp
->
mii_lock
,
flags
);
return
;
}
/* Establish sync by sending 32 logic ones. */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment