Commit cb636fe3 authored by Mithlesh Thukral's avatar Mithlesh Thukral Committed by Greg Kroah-Hartman

Staging: sxg: Indentation fixes - mostly 80 char lines

Fix up the indentation to Linux style. There was some indentation which was
not as per Linux style specially related to 80 char lines.
Signed-off-by: default avatarLinSysSoft Sahara Team <saharaproj@linsyssoft.com>
Signed-off-by: default avatarChristopher Harrer <charrer@alacritech.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent ddd6f0a8
...@@ -82,28 +82,30 @@ ...@@ -82,28 +82,30 @@
static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size, static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
enum sxg_buffer_type BufferType); enum sxg_buffer_type BufferType);
static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, void *RcvBlock, static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
dma_addr_t PhysicalAddress, void *RcvBlock,
u32 Length); dma_addr_t PhysicalAddress,
u32 Length);
static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
struct sxg_scatter_gather *SxgSgl, struct sxg_scatter_gather *SxgSgl,
dma_addr_t PhysicalAddress, dma_addr_t PhysicalAddress,
u32 Length); u32 Length);
static void sxg_mcast_init_crc32(void); static void sxg_mcast_init_crc32(void);
static int sxg_entry_open(struct net_device *dev); static int sxg_entry_open(struct net_device *dev);
static int sxg_entry_halt(struct net_device *dev); static int sxg_entry_halt(struct net_device *dev);
static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev); static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb); static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *SxgSgl); static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
struct sxg_scatter_gather *SxgSgl);
static void sxg_handle_interrupt(struct adapter_t *adapter); static void sxg_handle_interrupt(struct adapter_t *adapter);
static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId); static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId); static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId);
static void sxg_complete_slow_send(struct adapter_t *adapter); static void sxg_complete_slow_send(struct adapter_t *adapter);
static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_event *Event); static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
struct sxg_event *Event);
static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus); static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
static bool sxg_mac_filter(struct adapter_t *adapter, static bool sxg_mac_filter(struct adapter_t *adapter,
struct ether_header *EtherHdr, ushort length); struct ether_header *EtherHdr, ushort length);
...@@ -129,7 +131,8 @@ static int sxg_initialize_link(struct adapter_t *adapter); ...@@ -129,7 +131,8 @@ static int sxg_initialize_link(struct adapter_t *adapter);
static int sxg_phy_init(struct adapter_t *adapter); static int sxg_phy_init(struct adapter_t *adapter);
static void sxg_link_event(struct adapter_t *adapter); static void sxg_link_event(struct adapter_t *adapter);
static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter); static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
static void sxg_link_state(struct adapter_t *adapter, enum SXG_LINK_STATE LinkState); static void sxg_link_state(struct adapter_t *adapter,
enum SXG_LINK_STATE LinkState);
static int sxg_write_mdio_reg(struct adapter_t *adapter, static int sxg_write_mdio_reg(struct adapter_t *adapter,
u32 DevAddr, u32 RegAddr, u32 Value); u32 DevAddr, u32 RegAddr, u32 Value);
static int sxg_read_mdio_reg(struct adapter_t *adapter, static int sxg_read_mdio_reg(struct adapter_t *adapter,
...@@ -137,7 +140,8 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter, ...@@ -137,7 +140,8 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter,
static unsigned int sxg_first_init = 1; static unsigned int sxg_first_init = 1;
static char *sxg_banner = static char *sxg_banner =
"Alacritech SLIC Technology(tm) Server and Storage 10Gbe Accelerator (Non-Accelerated)\n"; "Alacritech SLIC Technology(tm) Server and Storage \
10Gbe Accelerator (Non-Accelerated)\n";
static int sxg_debug = 1; static int sxg_debug = 1;
static int debug = -1; static int debug = -1;
...@@ -152,8 +156,10 @@ static u32 dynamic_intagg = 0; ...@@ -152,8 +156,10 @@ static u32 dynamic_intagg = 0;
#define DRV_NAME "sxg" #define DRV_NAME "sxg"
#define DRV_VERSION "1.0.1" #define DRV_VERSION "1.0.1"
#define DRV_AUTHOR "Alacritech, Inc. Engineering" #define DRV_AUTHOR "Alacritech, Inc. Engineering"
#define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver" #define DRV_DESCRIPTION \
#define DRV_COPYRIGHT "Copyright 2000-2008 Alacritech, Inc. All rights reserved." "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
#define DRV_COPYRIGHT \
"Copyright 2000-2008 Alacritech, Inc. All rights reserved."
MODULE_AUTHOR(DRV_AUTHOR); MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_DESCRIPTION(DRV_DESCRIPTION);
...@@ -236,14 +242,15 @@ static struct sxg_trace_buffer *SxgTraceBuffer = NULL; ...@@ -236,14 +242,15 @@ static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
* Return * Return
* int * int
*/ */
static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL UcodeSel) static bool sxg_download_microcode(struct adapter_t *adapter,
enum SXG_UCODE_SEL UcodeSel)
{ {
struct sxg_hw_regs *HwRegs = adapter->HwRegs; struct sxg_hw_regs *HwRegs = adapter->HwRegs;
u32 Section; u32 Section;
u32 ThisSectionSize; u32 ThisSectionSize;
u32 *Instruction = NULL; u32 *Instruction = NULL;
u32 BaseAddress, AddressOffset, Address; u32 BaseAddress, AddressOffset, Address;
/* u32 Failure; */ /* u32 Failure; */
u32 ValueRead; u32 ValueRead;
u32 i; u32 i;
u32 numSections = 0; u32 numSections = 0;
...@@ -289,7 +296,8 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL ...@@ -289,7 +296,8 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
break; break;
} }
BaseAddress = sectionStart[Section]; BaseAddress = sectionStart[Section];
ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */ /* Size in instructions */
ThisSectionSize = sectionSize[Section] / 12;
for (AddressOffset = 0; AddressOffset < ThisSectionSize; for (AddressOffset = 0; AddressOffset < ThisSectionSize;
AddressOffset++) { AddressOffset++) {
Address = BaseAddress + AddressOffset; Address = BaseAddress + AddressOffset;
...@@ -333,7 +341,8 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL ...@@ -333,7 +341,8 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
break; break;
} }
BaseAddress = sectionStart[Section]; BaseAddress = sectionStart[Section];
ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */ /* Size in instructions */
ThisSectionSize = sectionSize[Section] / 12;
for (AddressOffset = 0; AddressOffset < ThisSectionSize; for (AddressOffset = 0; AddressOffset < ThisSectionSize;
AddressOffset++) { AddressOffset++) {
Address = BaseAddress + AddressOffset; Address = BaseAddress + AddressOffset;
...@@ -346,7 +355,7 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL ...@@ -346,7 +355,7 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
DBG_ERROR("sxg: %s PARITY ERROR\n", DBG_ERROR("sxg: %s PARITY ERROR\n",
__func__); __func__);
return (FALSE); /* Parity error */ return FALSE; /* Parity error */
} }
ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address); ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
/* Read the instruction back and compare */ /* Read the instruction back and compare */
...@@ -354,19 +363,19 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL ...@@ -354,19 +363,19 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
if (ValueRead != *Instruction) { if (ValueRead != *Instruction) {
DBG_ERROR("sxg: %s MISCOMPARE LOW\n", DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
__func__); __func__);
return (FALSE); /* Miscompare */ return FALSE; /* Miscompare */
} }
READ_REG(HwRegs->UcodeDataMiddle, ValueRead); READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
if (ValueRead != *(Instruction + 1)) { if (ValueRead != *(Instruction + 1)) {
DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n", DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
__func__); __func__);
return (FALSE); /* Miscompare */ return FALSE; /* Miscompare */
} }
READ_REG(HwRegs->UcodeDataHigh, ValueRead); READ_REG(HwRegs->UcodeDataHigh, ValueRead);
if (ValueRead != *(Instruction + 2)) { if (ValueRead != *(Instruction + 2)) {
DBG_ERROR("sxg: %s MISCOMPARE HIGH\n", DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
__func__); __func__);
return (FALSE); /* Miscompare */ return FALSE; /* Miscompare */
} }
/* Advance 3 u32S to start of next instruction */ /* Advance 3 u32S to start of next instruction */
Instruction += 3; Instruction += 3;
...@@ -391,7 +400,7 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL ...@@ -391,7 +400,7 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
if (i == 10000) { if (i == 10000) {
DBG_ERROR("sxg: %s TIMEOUT\n", __func__); DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
return (FALSE); /* Timeout */ return FALSE; /* Timeout */
} }
/* /*
* Now write the LoadSync register. This is used to * Now write the LoadSync register. This is used to
...@@ -413,18 +422,17 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL ...@@ -413,18 +422,17 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
* sxg_allocate_resources - Allocate memory and locks * sxg_allocate_resources - Allocate memory and locks
* *
* Arguments - * Arguments -
* adapter - A pointer to our adapter structure * adapter - A pointer to our adapter structure
* *
* Return * Return - int
* int
*/ */
static int sxg_allocate_resources(struct adapter_t *adapter) static int sxg_allocate_resources(struct adapter_t *adapter)
{ {
int status; int status;
u32 i; u32 i;
u32 RssIds, IsrCount; u32 RssIds, IsrCount;
/* struct sxg_xmt_ring *XmtRing; */ /* struct sxg_xmt_ring *XmtRing; */
/* struct sxg_rcv_ring *RcvRing; */ /* struct sxg_rcv_ring *RcvRing; */
DBG_ERROR("%s ENTER\n", __func__); DBG_ERROR("%s ENTER\n", __func__);
...@@ -470,14 +478,15 @@ static int sxg_allocate_resources(struct adapter_t *adapter) ...@@ -470,14 +478,15 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
(unsigned int)(sizeof(struct sxg_xmt_ring) * 1)); (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
/* /*
* Start with big items first - receive and transmit rings. At the moment * Start with big items first - receive and transmit rings.
* I'm going to keep the ring size fixed and adjust the * At the moment I'm going to keep the ring size fixed and
* TCBs if we fail. Later we might consider reducing the ring size as well.. * adjust the TCBs if we fail. Later we might
* consider reducing the ring size as well..
*/ */
adapter->XmtRings = pci_alloc_consistent(adapter->pcidev, adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
sizeof(struct sxg_xmt_ring) * sizeof(struct sxg_xmt_ring) *
1, 1,
&adapter->PXmtRings); &adapter->PXmtRings);
DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings); DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
if (!adapter->XmtRings) { if (!adapter->XmtRings) {
...@@ -533,15 +542,15 @@ static int sxg_allocate_resources(struct adapter_t *adapter) ...@@ -533,15 +542,15 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
*/ */
for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS; for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
i += SXG_RCV_DESCRIPTORS_PER_BLOCK) { i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
sxg_allocate_buffer_memory(adapter, sxg_allocate_buffer_memory(adapter,
SXG_RCV_BLOCK_SIZE(adapter-> SXG_RCV_BLOCK_SIZE(adapter->ReceiveBufferSize),
ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV); SXG_BUFFER_TYPE_RCV);
} }
/* /*
* NBL resource allocation can fail in the 'AllocateComplete' routine, which * NBL resource allocation can fail in the 'AllocateComplete' routine,
* doesn't return status. Make sure we got the number of buffers we requested * which doesn't return status. Make sure we got the number of buffers
* we requested
*/ */
if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) { if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
...@@ -555,12 +564,13 @@ static int sxg_allocate_resources(struct adapter_t *adapter) ...@@ -555,12 +564,13 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
/* Allocate event queues. */ /* Allocate event queues. */
adapter->EventRings = pci_alloc_consistent(adapter->pcidev, adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
sizeof(struct sxg_event_ring) * sizeof(struct sxg_event_ring) *
RssIds, RssIds,
&adapter->PEventRings); &adapter->PEventRings);
if (!adapter->EventRings) { if (!adapter->EventRings) {
/* Caller will call SxgFreeAdapter to clean up above allocations */ /* Caller will call SxgFreeAdapter to clean up above
* allocations */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
adapter, SXG_MAX_ENTRIES, 0, 0); adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES; status = STATUS_RESOURCES;
...@@ -573,7 +583,8 @@ static int sxg_allocate_resources(struct adapter_t *adapter) ...@@ -573,7 +583,8 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
adapter->Isr = pci_alloc_consistent(adapter->pcidev, adapter->Isr = pci_alloc_consistent(adapter->pcidev,
IsrCount, &adapter->PIsr); IsrCount, &adapter->PIsr);
if (!adapter->Isr) { if (!adapter->Isr) {
/* Caller will call SxgFreeAdapter to clean up above allocations */ /* Caller will call SxgFreeAdapter to clean up above
* allocations */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
adapter, SXG_MAX_ENTRIES, 0, 0); adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES; status = STATUS_RESOURCES;
...@@ -620,12 +631,19 @@ static void sxg_config_pci(struct pci_dev *pcidev) ...@@ -620,12 +631,19 @@ static void sxg_config_pci(struct pci_dev *pcidev)
pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command); DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
/* Set the command register */ /* Set the command register */
new_command = pci_command | (PCI_COMMAND_MEMORY | /* Memory Space Enable */ new_command = pci_command | (
PCI_COMMAND_MASTER | /* Bus master enable */ /* Memory Space Enable */
PCI_COMMAND_INVALIDATE | /* Memory write and invalidate */ PCI_COMMAND_MEMORY |
PCI_COMMAND_PARITY | /* Parity error response */ /* Bus master enable */
PCI_COMMAND_SERR | /* System ERR */ PCI_COMMAND_MASTER |
PCI_COMMAND_FAST_BACK); /* Fast back-to-back */ /* Memory write and invalidate */
PCI_COMMAND_INVALIDATE |
/* Parity error response */
PCI_COMMAND_PARITY |
/* System ERR */
PCI_COMMAND_SERR |
/* Fast back-to-back */
PCI_COMMAND_FAST_BACK);
if (pci_command != new_command) { if (pci_command != new_command) {
DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n", DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
__func__, pci_command, new_command); __func__, pci_command, new_command);
...@@ -633,7 +651,8 @@ static void sxg_config_pci(struct pci_dev *pcidev) ...@@ -633,7 +651,8 @@ static void sxg_config_pci(struct pci_dev *pcidev)
} }
} }
static unsigned char temp_mac_address[6] = { 0x00, 0xab, 0xcd, 0xef, 0x12, 0x69 }; static unsigned char temp_mac_address[6] =
{ 0x00, 0xab, 0xcd, 0xef, 0x12, 0x69 };
/* /*
* sxg_read_config * sxg_read_config
* @adapter : Pointer to the adapter structure for the card * @adapter : Pointer to the adapter structure for the card
...@@ -647,13 +666,15 @@ static inline int sxg_read_config(struct adapter_t *adapter) ...@@ -647,13 +666,15 @@ static inline int sxg_read_config(struct adapter_t *adapter)
unsigned long status; unsigned long status;
unsigned long i; unsigned long i;
data = pci_alloc_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), &p_addr); data = pci_alloc_consistent(adapter->pcidev,
sizeof(struct sw_cfg_data), &p_addr);
if(!data) { if(!data) {
/* /*
* We cant get even this much memory. Raise a hell * We cant get even this much memory. Raise a hell
* Get out of here * Get out of here
*/ */
printk(KERN_ERR"%s : Could not allocate memory for reading EEPROM\n", __FUNCTION__); printk(KERN_ERR"%s : Could not allocate memory for reading \
EEPROM\n", __FUNCTION__);
return -ENOMEM; return -ENOMEM;
} }
...@@ -668,22 +689,26 @@ static inline int sxg_read_config(struct adapter_t *adapter) ...@@ -668,22 +689,26 @@ static inline int sxg_read_config(struct adapter_t *adapter)
} }
switch(status) { switch(status) {
case SXG_CFG_LOAD_EEPROM: /*Config read from EEPROM succeeded */ /* Config read from EEPROM succeeded */
case SXG_CFG_LOAD_FLASH: /* onfig read from Flash succeeded */ case SXG_CFG_LOAD_EEPROM:
/* Copy the MAC address to adapter structure */ /* Config read from Flash succeeded */
memcpy(temp_mac_address, data->MacAddr[0].MacAddr, 6); case SXG_CFG_LOAD_FLASH:
/* TODO: We are not doing the remaining part : FRU, etc */ /* Copy the MAC address to adapter structure */
break; memcpy(temp_mac_address, data->MacAddr[0].MacAddr, 6);
/* TODO: We are not doing the remaining part : FRU,
case SXG_CFG_TIMEOUT: * etc
case SXG_CFG_LOAD_INVALID: */
case SXG_CFG_LOAD_ERROR: break;
default: /* Fix default handler later */ case SXG_CFG_TIMEOUT:
printk(KERN_WARNING"%s : We could not read the config word." case SXG_CFG_LOAD_INVALID:
"Status = %ld\n", __FUNCTION__, status); case SXG_CFG_LOAD_ERROR:
break; default: /* Fix default handler later */
printk(KERN_WARNING"%s : We could not read the config \
word. Status = %ld\n", __FUNCTION__, status);
break;
} }
pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data, p_addr); pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
p_addr);
if (adapter->netdev) { if (adapter->netdev) {
memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6); memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
...@@ -782,9 +807,9 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -782,9 +807,9 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
goto err_out_free_mmio_region; goto err_out_free_mmio_region;
} }
DBG_ERROR DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] len[%lx], IRQ %d.\n", len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
__func__, memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq); mmio_len, pcidev->irq);
adapter->HwRegs = (void *)memmapped_ioaddr; adapter->HwRegs = (void *)memmapped_ioaddr;
adapter->base_addr = memmapped_ioaddr; adapter->base_addr = memmapped_ioaddr;
...@@ -832,12 +857,12 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -832,12 +857,12 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE; adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
} }
/* /*
* status = SXG_READ_EEPROM(adapter); * status = SXG_READ_EEPROM(adapter);
* if (!status) { * if (!status) {
* goto sxg_init_bad; * goto sxg_init_bad;
* } * }
*/ */
DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__); DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
sxg_config_pci(pcidev); sxg_config_pci(pcidev);
...@@ -894,7 +919,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -894,7 +919,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
netdev->set_multicast_list = sxg_mcast_set_list; netdev->set_multicast_list = sxg_mcast_set_list;
strcpy(netdev->name, "eth%d"); strcpy(netdev->name, "eth%d");
/* strcpy(netdev->name, pci_name(pcidev)); */ /* strcpy(netdev->name, pci_name(pcidev)); */
if ((err = register_netdev(netdev))) { if ((err = register_netdev(netdev))) {
DBG_ERROR("Cannot register net device, aborting. %s\n", DBG_ERROR("Cannot register net device, aborting. %s\n",
netdev->name); netdev->name);
...@@ -902,14 +927,15 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -902,14 +927,15 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
} }
DBG_ERROR DBG_ERROR
("sxg: %s addr 0x%lx, irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", ("sxg: %s addr 0x%lx, irq %d, MAC addr \
%02X:%02X:%02X:%02X:%02X:%02X\n",
netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0], netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
netdev->dev_addr[4], netdev->dev_addr[5]); netdev->dev_addr[4], netdev->dev_addr[5]);
/*sxg_init_bad: */ /* sxg_init_bad: */
ASSERT(status == FALSE); ASSERT(status == FALSE);
/* sxg_free_adapter(adapter); */ /* sxg_free_adapter(adapter); */
DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__, DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
status, jiffies, smp_processor_id()); status, jiffies, smp_processor_id());
...@@ -991,18 +1017,17 @@ static void sxg_enable_interrupt(struct adapter_t *adapter) ...@@ -991,18 +1017,17 @@ static void sxg_enable_interrupt(struct adapter_t *adapter)
* sxg_isr - Process an line-based interrupt * sxg_isr - Process an line-based interrupt
* *
* Arguments: * Arguments:
* Context - Our adapter structure * Context - Our adapter structure
* QueueDefault - Output parameter to queue to default CPU * QueueDefault - Output parameter to queue to default CPU
* TargetCpus - Output bitmap to schedule DPC's * TargetCpus - Output bitmap to schedule DPC's
* *
* Return Value: * Return Value: TRUE if our interrupt
* TRUE if our interrupt
*/ */
static irqreturn_t sxg_isr(int irq, void *dev_id) static irqreturn_t sxg_isr(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *) dev_id; struct net_device *dev = (struct net_device *) dev_id;
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
/* u32 CpuMask = 0, i; */ /* u32 CpuMask = 0, i; */
adapter->Stats.NumInts++; adapter->Stats.NumInts++;
if (adapter->Isr[0] == 0) { if (adapter->Isr[0] == 0) {
...@@ -1023,7 +1048,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id) ...@@ -1023,7 +1048,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
adapter->IsrCopy[0] = adapter->Isr[0]; adapter->IsrCopy[0] = adapter->Isr[0];
adapter->Isr[0] = 0; adapter->Isr[0] = 0;
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE); WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
/* ASSERT(adapter->IsrDpcsPending == 0); */ /* ASSERT(adapter->IsrDpcsPending == 0); */
#if XXXTODO /* RSS Stuff */ #if XXXTODO /* RSS Stuff */
/* /*
* If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
...@@ -1033,7 +1058,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id) ...@@ -1033,7 +1058,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
for (i = 0; for (i = 0;
i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount; i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
i++) { i++) {
struct sxg_event_ring *EventRing = &adapter->EventRings[i]; struct sxg_event_ring *EventRing =
&adapter->EventRings[i];
struct sxg_event *Event = struct sxg_event *Event =
&EventRing->Ring[adapter->NextEvent[i]]; &EventRing->Ring[adapter->NextEvent[i]];
unsigned char Cpu = unsigned char Cpu =
...@@ -1044,7 +1070,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id) ...@@ -1044,7 +1070,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
} }
} }
} }
/* Now, either schedule the CPUs specified by the CpuMask, /*
* Now, either schedule the CPUs specified by the CpuMask,
* or queue default * or queue default
*/ */
if (CpuMask) { if (CpuMask) {
...@@ -1065,7 +1092,7 @@ int debug_inthandler = 0; ...@@ -1065,7 +1092,7 @@ int debug_inthandler = 0;
static void sxg_handle_interrupt(struct adapter_t *adapter) static void sxg_handle_interrupt(struct adapter_t *adapter)
{ {
/* unsigned char RssId = 0; */ /* unsigned char RssId = 0; */
u32 NewIsr; u32 NewIsr;
if (++debug_inthandler < 20) { if (++debug_inthandler < 20) {
...@@ -1154,10 +1181,12 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId) ...@@ -1154,10 +1181,12 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
} }
/* Card crash */ /* Card crash */
if (Isr & SXG_ISR_DEAD) { if (Isr & SXG_ISR_DEAD) {
/* Set aside the crash info and set the adapter state to RESET */ /*
adapter->CrashCpu = * Set aside the crash info and set the adapter state
(unsigned char)((Isr & SXG_ISR_CPU) >> * to RESET
SXG_ISR_CPU_SHIFT); */
adapter->CrashCpu = (unsigned char)
((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH); adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
adapter->Dead = TRUE; adapter->Dead = TRUE;
DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__, DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
...@@ -1188,7 +1217,8 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId) ...@@ -1188,7 +1217,8 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
} }
/* Dump */ /* Dump */
if (Isr & SXG_ISR_UPC) { if (Isr & SXG_ISR_UPC) {
ASSERT(adapter->DumpCmdRunning); /* Maybe change when debug is added.. */ /* Maybe change when debug is added.. */
ASSERT(adapter->DumpCmdRunning);
adapter->DumpCmdRunning = FALSE; adapter->DumpCmdRunning = FALSE;
} }
/* Link event */ /* Link event */
...@@ -1199,8 +1229,8 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId) ...@@ -1199,8 +1229,8 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
if (Isr & SXG_ISR_BREAK) { if (Isr & SXG_ISR_BREAK) {
/* /*
* At the moment AGDB isn't written to support interactive * At the moment AGDB isn't written to support interactive
* debug sessions. When it is, this interrupt will be used * debug sessions. When it is, this interrupt will be used to
* to signal AGDB that it has hit a breakpoint. For now, ASSERT. * signal AGDB that it has hit a breakpoint. For now, ASSERT.
*/ */
ASSERT(0); ASSERT(0);
} }
...@@ -1261,7 +1291,8 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId) ...@@ -1261,7 +1291,8 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
adapter->NextEvent); adapter->NextEvent);
switch (Event->Code) { switch (Event->Code) {
case EVENT_CODE_BUFFERS: case EVENT_CODE_BUFFERS:
ASSERT(!(Event->CommandIndex & 0xFF00)); /* struct sxg_ring_info Head & Tail == unsigned char */ /* struct sxg_ring_info Head & Tail == unsigned char */
ASSERT(!(Event->CommandIndex & 0xFF00));
sxg_complete_descriptor_blocks(adapter, sxg_complete_descriptor_blocks(adapter,
Event->CommandIndex); Event->CommandIndex);
break; break;
...@@ -1279,8 +1310,9 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId) ...@@ -1279,8 +1310,9 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
* capability of an indication list. * capability of an indication list.
*/ */
#else #else
/* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */ /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
rx_bytes = Event->Length; /* (rcvbuf->length & IRHDDR_FLEN_MSK); */ /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
rx_bytes = Event->Length;
adapter->stats.rx_packets++; adapter->stats.rx_packets++;
adapter->stats.rx_bytes += rx_bytes; adapter->stats.rx_bytes += rx_bytes;
#if SXG_OFFLOAD_IP_CHECKSUM #if SXG_OFFLOAD_IP_CHECKSUM
...@@ -1294,7 +1326,7 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId) ...@@ -1294,7 +1326,7 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
default: default:
DBG_ERROR("%s: ERROR Invalid EventCode %d\n", DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
__func__, Event->Code); __func__, Event->Code);
/* ASSERT(0); */ /* ASSERT(0); */
} }
/* /*
* See if we need to restock card receive buffers. * See if we need to restock card receive buffers.
...@@ -1404,7 +1436,8 @@ static void sxg_complete_slow_send(struct adapter_t *adapter) ...@@ -1404,7 +1436,8 @@ static void sxg_complete_slow_send(struct adapter_t *adapter)
case SXG_SGL_DUMB: case SXG_SGL_DUMB:
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct sxg_scatter_gather *SxgSgl = (struct sxg_scatter_gather *)ContextType; struct sxg_scatter_gather *SxgSgl =
(struct sxg_scatter_gather *)ContextType;
/* Dumb-nic send. Command context is the dumb-nic SGL */ /* Dumb-nic send. Command context is the dumb-nic SGL */
skb = (struct sk_buff *)ContextType; skb = (struct sk_buff *)ContextType;
...@@ -1415,13 +1448,14 @@ static void sxg_complete_slow_send(struct adapter_t *adapter) ...@@ -1415,13 +1448,14 @@ static void sxg_complete_slow_send(struct adapter_t *adapter)
0, 0); 0, 0);
printk("ASK:sxg_complete_slow_send: freeing an skb [%p]\n", skb); printk("ASK:sxg_complete_slow_send: freeing an skb [%p]\n", skb);
ASSERT(adapter->Stats.XmtQLen); ASSERT(adapter->Stats.XmtQLen);
adapter->Stats.XmtQLen--; /* within XmtZeroLock */ adapter->Stats.XmtQLen--;/* within XmtZeroLock */
adapter->Stats.XmtOk++; adapter->Stats.XmtOk++;
/* /*
* Now drop the lock and complete the send back to * Now drop the lock and complete the send
* Microsoft. We need to drop the lock because * back to Microsoft. We need to drop the lock
* Microsoft can come back with a chimney send, which * because Microsoft can come back with a
* results in a double trip in SxgTcpOuput * chimney send, which results in a double trip
* in SxgTcpOuput
*/ */
spin_unlock(&adapter->XmtZeroLock); spin_unlock(&adapter->XmtZeroLock);
SXG_COMPLETE_DUMB_SEND(adapter, skb); SXG_COMPLETE_DUMB_SEND(adapter, skb);
...@@ -1445,10 +1479,10 @@ static void sxg_complete_slow_send(struct adapter_t *adapter) ...@@ -1445,10 +1479,10 @@ static void sxg_complete_slow_send(struct adapter_t *adapter)
* adapter - A pointer to our adapter structure * adapter - A pointer to our adapter structure
* Event - Receive event * Event - Receive event
* *
* Return * Return - skb
* skb
*/ */
static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_event *Event) static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
struct sxg_event *Event)
{ {
struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
struct sk_buff *Packet; struct sk_buff *Packet;
...@@ -1476,12 +1510,15 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev ...@@ -1476,12 +1510,15 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev
goto drop; goto drop;
} }
printk("ASK:sxg_slow_receive: event host handle %p\n", RcvDataBufferHdr); printk("ASK:sxg_slow_receive:event host handle %p\n", RcvDataBufferHdr);
data = SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr); data = SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr);
for (i = 0; i < 32; i++) for (i = 0; i < 32; i++)
dptr += sprintf(dptr, "%02x ", (unsigned)data[i]); dptr += sprintf(dptr, "%02x ", (unsigned)data[i]);
printk("ASK:sxg_slow_receive: data %s\n", dstr); printk("ASK:sxg_slow_receive: data %s\n", dstr);
/* memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), RcvDataBufferHdr->VirtualAddress, Event->Length);*/ /*
* memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
* RcvDataBufferHdr->VirtualAddress, Event->Length);
*/
/* Change buffer state to UPSTREAM */ /* Change buffer state to UPSTREAM */
RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
...@@ -1498,8 +1535,9 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev ...@@ -1498,8 +1535,9 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev
} }
#if XXXTODO /* VLAN stuff */ #if XXXTODO /* VLAN stuff */
/* If there's a VLAN tag, extract it and validate it */ /* If there's a VLAN tag, extract it and validate it */
if (((struct ether_header*) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))-> if (((struct ether_header *)
EtherType == ETHERTYPE_VLAN) { (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
== ETHERTYPE_VLAN) {
if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) != if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
STATUS_SUCCESS) { STATUS_SUCCESS) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
...@@ -1526,7 +1564,8 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev ...@@ -1526,7 +1564,8 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev
Packet = RcvDataBufferHdr->SxgDumbRcvPacket; Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event); SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
Packet->protocol = eth_type_trans(Packet, adapter->netdev); Packet->protocol = eth_type_trans(Packet, adapter->netdev);
printk("ASK:sxg_slow_receive: protocol %x\n", (unsigned) Packet->protocol); printk("ASK:sxg_slow_receive: protocol %x\n",
(unsigned) Packet->protocol);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
RcvDataBufferHdr, Packet, Event->Length, 0); RcvDataBufferHdr, Packet, Event->Length, 0);
...@@ -1554,8 +1593,7 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev ...@@ -1554,8 +1593,7 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev
* adapter - Adapter structure * adapter - Adapter structure
* ErrorStatus - 4-byte receive error status * ErrorStatus - 4-byte receive error status
* *
* Return Value: * Return Value : None
* None
*/ */
static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus) static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
{ {
...@@ -1633,11 +1671,10 @@ static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus) ...@@ -1633,11 +1671,10 @@ static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
* pether - Ethernet header * pether - Ethernet header
* length - Frame length * length - Frame length
* *
* Return Value: * Return Value : TRUE if the frame is to be allowed
* TRUE if the frame is to be allowed
*/ */
static bool sxg_mac_filter(struct adapter_t *adapter, struct ether_header *EtherHdr, static bool sxg_mac_filter(struct adapter_t *adapter,
ushort length) struct ether_header *EtherHdr, ushort length)
{ {
bool EqualAddr; bool EqualAddr;
...@@ -1927,17 +1964,19 @@ static int sxg_entry_halt(struct net_device *dev) ...@@ -1927,17 +1964,19 @@ static int sxg_entry_halt(struct net_device *dev)
static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{ {
ASSERT(rq); ASSERT(rq);
/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */ /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
switch (cmd) { switch (cmd) {
case SIOCSLICSETINTAGG: case SIOCSLICSETINTAGG:
{ {
/* struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); */ /* struct adapter_t *adapter = (struct adapter_t *)
* netdev_priv(dev);
*/
u32 data[7]; u32 data[7];
u32 intagg; u32 intagg;
if (copy_from_user(data, rq->ifr_data, 28)) { if (copy_from_user(data, rq->ifr_data, 28)) {
DBG_ERROR DBG_ERROR("copy_from_user FAILED getting \
("copy_from_user FAILED getting initial params\n"); initial params\n");
return -EFAULT; return -EFAULT;
} }
intagg = data[0]; intagg = data[0];
...@@ -1948,7 +1987,7 @@ static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ...@@ -1948,7 +1987,7 @@ static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
} }
default: default:
/* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */ /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
return 0; return 0;
...@@ -1960,8 +1999,8 @@ static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ...@@ -1960,8 +1999,8 @@ static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
* sxg_send_packets - Send a skb packet * sxg_send_packets - Send a skb packet
* *
* Arguments: * Arguments:
* skb - The packet to send * skb - The packet to send
* dev - Our linux net device that refs our adapter * dev - Our linux net device that refs our adapter
* *
* Return: * Return:
* 0 regardless of outcome XXXTODO refer to e1000 driver * 0 regardless of outcome XXXTODO refer to e1000 driver
...@@ -2013,7 +2052,7 @@ static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev) ...@@ -2013,7 +2052,7 @@ static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
/* reject & complete all the packets if they cant be sent */ /* reject & complete all the packets if they cant be sent */
if (status != STATUS_SUCCESS) { if (status != STATUS_SUCCESS) {
#if XXXTODO #if XXXTODO
/* sxg_send_packets_fail(adapter, skb, status); */ /* sxg_send_packets_fail(adapter, skb, status); */
#else #else
SXG_DROP_DUMB_SEND(adapter, skb); SXG_DROP_DUMB_SEND(adapter, skb);
adapter->stats.tx_dropped++; adapter->stats.tx_dropped++;
...@@ -2035,8 +2074,7 @@ static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev) ...@@ -2035,8 +2074,7 @@ static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
* adapter - Pointer to our adapter structure * adapter - Pointer to our adapter structure
* skb - The packet to be sent * skb - The packet to be sent
* *
* Return - * Return - STATUS of send
* STATUS of send
*/ */
static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb) static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
{ {
...@@ -2072,7 +2110,7 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb) ...@@ -2072,7 +2110,7 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
/* Call the common sxg_dumb_sgl routine to complete the send. */ /* Call the common sxg_dumb_sgl routine to complete the send. */
sxg_dumb_sgl(pSgl, SxgSgl); sxg_dumb_sgl(pSgl, SxgSgl);
/* Return success sxg_dumb_sgl (or something later) will complete it. */ /* Return success sxg_dumb_sgl (or something later) will complete it.*/
return (STATUS_SUCCESS); return (STATUS_SUCCESS);
} }
...@@ -2086,7 +2124,8 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb) ...@@ -2086,7 +2124,8 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
* Return Value: * Return Value:
* None. * None.
*/ */
static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *SxgSgl) static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
struct sxg_scatter_gather *SxgSgl)
{ {
struct adapter_t *adapter = SxgSgl->adapter; struct adapter_t *adapter = SxgSgl->adapter;
struct sk_buff *skb = SxgSgl->DumbPacket; struct sk_buff *skb = SxgSgl->DumbPacket;
...@@ -2094,10 +2133,10 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx ...@@ -2094,10 +2133,10 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0]; struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo; struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
struct sxg_cmd *XmtCmd = NULL; struct sxg_cmd *XmtCmd = NULL;
/* u32 Index = 0; */ /* u32 Index = 0; */
u32 DataLength = skb->len; u32 DataLength = skb->len;
/* unsigned int BufLen; */ /* unsigned int BufLen; */
/* u32 SglOffset; */ /* u32 SglOffset; */
u64 phys_addr; u64 phys_addr;
unsigned char*data; unsigned char*data;
int i; int i;
...@@ -2167,8 +2206,7 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx ...@@ -2167,8 +2206,7 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
* Fill in the command * Fill in the command
* Copy out the first SGE to the command and adjust for offset * Copy out the first SGE to the command and adjust for offset
*/ */
phys_addr = phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
pci_map_single(adapter->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
memset(XmtCmd, '\0', sizeof(*XmtCmd)); memset(XmtCmd, '\0', sizeof(*XmtCmd));
XmtCmd->Buffer.FirstSgeAddress = phys_addr; XmtCmd->Buffer.FirstSgeAddress = phys_addr;
...@@ -2210,8 +2248,8 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx ...@@ -2210,8 +2248,8 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
adapter->Stats.XmtErrors++; adapter->Stats.XmtErrors++;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail); pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
/* SxgSgl->DumbPacket is the skb */
SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); /* SxgSgl->DumbPacket is the skb */ SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
} }
/* /*
...@@ -2277,15 +2315,24 @@ static int sxg_initialize_link(struct adapter_t *adapter) ...@@ -2277,15 +2315,24 @@ static int sxg_initialize_link(struct adapter_t *adapter)
WRITE_REG(HwRegs->MacConfig0, 0, TRUE); WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
/* Configure MAC */ /* Configure MAC */
WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | /* Allow sending of pause */ WRITE_REG(HwRegs->MacConfig1, (
AXGMAC_CFG1_XMT_EN | /* Enable XMT */ /* Allow sending of pause */
AXGMAC_CFG1_RCV_PAUSE | /* Enable detection of pause */ AXGMAC_CFG1_XMT_PAUSE |
AXGMAC_CFG1_RCV_EN | /* Enable receive */ /* Enable XMT */
AXGMAC_CFG1_SHORT_ASSERT | /* short frame detection */ AXGMAC_CFG1_XMT_EN |
AXGMAC_CFG1_CHECK_LEN | /* Verify frame length */ /* Enable detection of pause */
AXGMAC_CFG1_GEN_FCS | /* Generate FCS */ AXGMAC_CFG1_RCV_PAUSE |
AXGMAC_CFG1_PAD_64), /* Pad frames to 64 bytes */ /* Enable receive */
TRUE); AXGMAC_CFG1_RCV_EN |
/* short frame detection */
AXGMAC_CFG1_SHORT_ASSERT |
/* Verify frame length */
AXGMAC_CFG1_CHECK_LEN |
/* Generate FCS */
AXGMAC_CFG1_GEN_FCS |
/* Pad frames to 64 bytes */
AXGMAC_CFG1_PAD_64),
TRUE);
/* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */ /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
if (adapter->JumboEnabled) { if (adapter->JumboEnabled) {
...@@ -2314,15 +2361,20 @@ static int sxg_initialize_link(struct adapter_t *adapter) ...@@ -2314,15 +2361,20 @@ static int sxg_initialize_link(struct adapter_t *adapter)
/* /*
* Per information given by Aeluros, wait 100 ms after removing reset. * Per information given by Aeluros, wait 100 ms after removing reset.
* It's not enough to wait for the self-clearing reset bit in reg 0 to clear. * It's not enough to wait for the self-clearing reset bit in reg 0 to
* clear.
*/ */
mdelay(100); mdelay(100);
/* Verify the PHY has come up by checking that the Reset bit has cleared. */ /* Verify the PHY has come up by checking that the Reset bit has
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ * cleared.
PHY_PMA_CONTROL1, /* PMA/PMD control register */ */
&Value); status = sxg_read_mdio_reg(adapter,
DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value, (Value & PMA_CONTROL1_RESET)); MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
PHY_PMA_CONTROL1, /* PMA/PMD control register */
&Value);
DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
(Value & PMA_CONTROL1_RESET));
if (status != STATUS_SUCCESS) if (status != STATUS_SUCCESS)
return (STATUS_FAILURE); return (STATUS_FAILURE);
if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */ if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
...@@ -2343,16 +2395,26 @@ static int sxg_initialize_link(struct adapter_t *adapter) ...@@ -2343,16 +2395,26 @@ static int sxg_initialize_link(struct adapter_t *adapter)
return (STATUS_FAILURE); return (STATUS_FAILURE);
/* Enable the Link Alarm */ /* Enable the Link Alarm */
status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
LASI_CONTROL, /* LASI control register */ /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
LASI_CTL_LS_ALARM_ENABLE); /* enable link alarm bit */ * LASI_CONTROL - LASI control register
* LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
*/
status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
LASI_CONTROL,
LASI_CTL_LS_ALARM_ENABLE);
if (status != STATUS_SUCCESS) if (status != STATUS_SUCCESS)
return (STATUS_FAILURE); return (STATUS_FAILURE);
/* XXXTODO - temporary - verify bit is set */ /* XXXTODO - temporary - verify bit is set */
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
LASI_CONTROL, /* LASI control register */ /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
* LASI_CONTROL - LASI control register
*/
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
LASI_CONTROL,
&Value); &Value);
if (status != STATUS_SUCCESS) if (status != STATUS_SUCCESS)
return (STATUS_FAILURE); return (STATUS_FAILURE);
if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) { if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
...@@ -2397,16 +2459,20 @@ static int sxg_phy_init(struct adapter_t *adapter) ...@@ -2397,16 +2459,20 @@ static int sxg_phy_init(struct adapter_t *adapter)
DBG_ERROR("ENTER %s\n", __func__); DBG_ERROR("ENTER %s\n", __func__);
/* Read a register to identify the PHY type */ /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ * 0xC205 - PHY ID register (?)
0xC205, /* PHY ID register (?) */ * &Value - XXXTODO - add def
&Value); /* XXXTODO - add def */ */
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
0xC205,
&Value);
if (status != STATUS_SUCCESS) if (status != STATUS_SUCCESS)
return (STATUS_FAILURE); return (STATUS_FAILURE);
if (Value == 0x0012) { /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */ if (Value == 0x0012) {
DBG_ERROR /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
("AEL2005C PHY detected. Downloading PHY microcode.\n"); DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
microcode.\n");
/* Initialize AEL2005C PHY and download PHY microcode */ /* Initialize AEL2005C PHY and download PHY microcode */
for (p = PhyUcode; p->Addr != 0xFFFF; p++) { for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
...@@ -2414,10 +2480,13 @@ static int sxg_phy_init(struct adapter_t *adapter) ...@@ -2414,10 +2480,13 @@ static int sxg_phy_init(struct adapter_t *adapter)
/* if address == 0, data == sleep time in ms */ /* if address == 0, data == sleep time in ms */
mdelay(p->Data); mdelay(p->Data);
} else { } else {
/* write the given data to the specified address */ /* write the given data to the specified address */
status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ status = sxg_write_mdio_reg(adapter,
p->Addr, /* PHY address */ MIIM_DEV_PHY_PMA,
p->Data); /* PHY data */ /* PHY address */
p->Addr,
/* PHY data */
p->Data);
if (status != STATUS_SUCCESS) if (status != STATUS_SUCCESS)
return (STATUS_FAILURE); return (STATUS_FAILURE);
} }
...@@ -2458,13 +2527,15 @@ static void sxg_link_event(struct adapter_t *adapter) ...@@ -2458,13 +2527,15 @@ static void sxg_link_event(struct adapter_t *adapter)
mdelay(10); mdelay(10);
/* Now clear the alarm by reading the LASI status register. */ /* Now clear the alarm by reading the LASI status register. */
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
LASI_STATUS, /* LASI status register */ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
/* LASI status register */
LASI_STATUS,
&Value); &Value);
if (status != STATUS_SUCCESS) { if (status != STATUS_SUCCESS) {
DBG_ERROR("Error reading LASI Status MDIO register!\n"); DBG_ERROR("Error reading LASI Status MDIO register!\n");
sxg_link_state(adapter, SXG_LINK_DOWN); sxg_link_state(adapter, SXG_LINK_DOWN);
/* ASSERT(0); */ /* ASSERT(0); */
} }
ASSERT(Value & LASI_STATUS_LS_ALARM); ASSERT(Value & LASI_STATUS_LS_ALARM);
...@@ -2483,7 +2554,7 @@ static void sxg_link_event(struct adapter_t *adapter) ...@@ -2483,7 +2554,7 @@ static void sxg_link_event(struct adapter_t *adapter)
*/ */
DBG_ERROR("SXG: sxg_link_event: Can't get here!\n"); DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value); DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
/* ASSERT(0); */ /* ASSERT(0); */
} }
DBG_ERROR("EXIT %s\n", __func__); DBG_ERROR("EXIT %s\n", __func__);
...@@ -2512,8 +2583,11 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) ...@@ -2512,8 +2583,11 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
* the following 3 bits (from 3 different MDIO registers) are all true. * the following 3 bits (from 3 different MDIO registers) are all true.
*/ */
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
PHY_PMA_RCV_DET, /* PMA/PMD Receive Signal Detect register */ /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
/* PMA/PMD Receive Signal Detect register */
PHY_PMA_RCV_DET,
&Value); &Value);
if (status != STATUS_SUCCESS) if (status != STATUS_SUCCESS)
goto bad; goto bad;
...@@ -2522,8 +2596,10 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) ...@@ -2522,8 +2596,10 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
if (!(Value & PMA_RCV_DETECT)) if (!(Value & PMA_RCV_DETECT))
return (SXG_LINK_DOWN); return (SXG_LINK_DOWN);
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, /* PHY PCS module */ /* MIIM_DEV_PHY_PCS - PHY PCS module */
PHY_PCS_10G_STATUS1, /* PCS 10GBASE-R Status 1 register */ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
/* PCS 10GBASE-R Status 1 register */
PHY_PCS_10G_STATUS1,
&Value); &Value);
if (status != STATUS_SUCCESS) if (status != STATUS_SUCCESS)
goto bad; goto bad;
...@@ -2532,8 +2608,9 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) ...@@ -2532,8 +2608,9 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
if (!(Value & PCS_10B_BLOCK_LOCK)) if (!(Value & PCS_10B_BLOCK_LOCK))
return (SXG_LINK_DOWN); return (SXG_LINK_DOWN);
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, /* PHY XS module */ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
PHY_XS_LANE_STATUS, /* XS Lane Status register */ /* XS Lane Status register */
PHY_XS_LANE_STATUS,
&Value); &Value);
if (status != STATUS_SUCCESS) if (status != STATUS_SUCCESS)
goto bad; goto bad;
...@@ -2548,7 +2625,7 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) ...@@ -2548,7 +2625,7 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
return (SXG_LINK_UP); return (SXG_LINK_UP);
bad: bad:
/* An error occurred reading an MDIO register. This shouldn't happen. */ /* An error occurred reading an MDIO register. This shouldn't happen. */
DBG_ERROR("Error reading an MDIO register!\n"); DBG_ERROR("Error reading an MDIO register!\n");
ASSERT(0); ASSERT(0);
return (SXG_LINK_DOWN); return (SXG_LINK_DOWN);
...@@ -2581,7 +2658,8 @@ static void sxg_indicate_link_state(struct adapter_t *adapter, ...@@ -2581,7 +2658,8 @@ static void sxg_indicate_link_state(struct adapter_t *adapter,
* Return * Return
* None * None
*/ */
static void sxg_link_state(struct adapter_t *adapter, enum SXG_LINK_STATE LinkState) static void sxg_link_state(struct adapter_t *adapter,
enum SXG_LINK_STATE LinkState)
{ {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
adapter, LinkState, adapter->LinkState, adapter->State); adapter, LinkState, adapter->LinkState, adapter->State);
...@@ -2596,7 +2674,8 @@ static void sxg_link_state(struct adapter_t *adapter, enum SXG_LINK_STATE LinkSt ...@@ -2596,7 +2674,8 @@ static void sxg_link_state(struct adapter_t *adapter, enum SXG_LINK_STATE LinkSt
if (LinkState == adapter->LinkState) { if (LinkState == adapter->LinkState) {
/* Nothing changed.. */ /* Nothing changed.. */
spin_unlock(&adapter->AdapterLock); spin_unlock(&adapter->AdapterLock);
DBG_ERROR("EXIT #0 %s\n", __func__); DBG_ERROR("EXIT #0 %s. Link status = %d\n",
__func__, LinkState);
return; return;
} }
/* Save the adapter state */ /* Save the adapter state */
...@@ -2625,13 +2704,15 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter, ...@@ -2625,13 +2704,15 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter,
u32 DevAddr, u32 RegAddr, u32 Value) u32 DevAddr, u32 RegAddr, u32 Value)
{ {
struct sxg_hw_regs *HwRegs = adapter->HwRegs; struct sxg_hw_regs *HwRegs = adapter->HwRegs;
u32 AddrOp; /* Address operation (written to MIIM field reg) */ /* Address operation (written to MIIM field reg) */
u32 WriteOp; /* Write operation (written to MIIM field reg) */ u32 AddrOp;
u32 Cmd; /* Command (written to MIIM command reg) */ /* Write operation (written to MIIM field reg) */
u32 WriteOp;
u32 Cmd;/* Command (written to MIIM command reg) */
u32 ValueRead; u32 ValueRead;
u32 Timeout; u32 Timeout;
/* DBG_ERROR("ENTER %s\n", __func__); */ /* DBG_ERROR("ENTER %s\n", __func__); */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
adapter, 0, 0, 0); adapter, 0, 0, 0);
...@@ -2694,7 +2775,7 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter, ...@@ -2694,7 +2775,7 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter,
} }
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
/* DBG_ERROR("EXIT %s\n", __func__); */ /* DBG_ERROR("EXIT %s\n", __func__); */
return (STATUS_SUCCESS); return (STATUS_SUCCESS);
} }
...@@ -2706,7 +2787,7 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter, ...@@ -2706,7 +2787,7 @@ static int sxg_write_mdio_reg(struct adapter_t *adapter,
* adapter - A pointer to our adapter structure * adapter - A pointer to our adapter structure
* DevAddr - MDIO device number being addressed * DevAddr - MDIO device number being addressed
* RegAddr - register address for the specified MDIO device * RegAddr - register address for the specified MDIO device
* pValue - pointer to where to put data read from the MDIO register * pValue - pointer to where to put data read from the MDIO register
* *
* Return * Return
* status * status
...@@ -2715,15 +2796,15 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter, ...@@ -2715,15 +2796,15 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter,
u32 DevAddr, u32 RegAddr, u32 *pValue) u32 DevAddr, u32 RegAddr, u32 *pValue)
{ {
struct sxg_hw_regs *HwRegs = adapter->HwRegs; struct sxg_hw_regs *HwRegs = adapter->HwRegs;
u32 AddrOp; /* Address operation (written to MIIM field reg) */ u32 AddrOp; /* Address operation (written to MIIM field reg) */
u32 ReadOp; /* Read operation (written to MIIM field reg) */ u32 ReadOp; /* Read operation (written to MIIM field reg) */
u32 Cmd; /* Command (written to MIIM command reg) */ u32 Cmd; /* Command (written to MIIM command reg) */
u32 ValueRead; u32 ValueRead;
u32 Timeout; u32 Timeout;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
adapter, 0, 0, 0); adapter, 0, 0, 0);
DBG_ERROR("ENTER %s\n", __FUNCTION__); DBG_ERROR("ENTER %s\n", __FUNCTION__);
/* Ensure values don't exceed field width */ /* Ensure values don't exceed field width */
DevAddr &= 0x001F; /* 5-bit field */ DevAddr &= 0x001F; /* 5-bit field */
...@@ -2790,7 +2871,7 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter, ...@@ -2790,7 +2871,7 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter,
READ_REG(HwRegs->MacAmiimField, *pValue); READ_REG(HwRegs->MacAmiimField, *pValue);
*pValue &= 0xFFFF; /* data is in the lower 16 bits */ *pValue &= 0xFFFF; /* data is in the lower 16 bits */
DBG_ERROR("EXIT %s\n", __FUNCTION__); DBG_ERROR("EXIT %s\n", __FUNCTION__);
return (STATUS_SUCCESS); return (STATUS_SUCCESS);
} }
...@@ -2799,21 +2880,21 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter, ...@@ -2799,21 +2880,21 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter,
* Functions to obtain the CRC corresponding to the destination mac address. * Functions to obtain the CRC corresponding to the destination mac address.
* This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
* the polynomial: * the polynomial:
* x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1. * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
* + x^4 + x^2 + x^1.
* *
* After the CRC for the 6 bytes is generated (but before the value is complemented), * After the CRC for the 6 bytes is generated (but before the value is
* we must then transpose the value and return bits 30-23. * complemented), we must then transpose the value and return bits 30-23.
*/ */
static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */ static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
static u32 sxg_crc_init; /* Is table initialized */
/* /* Contruct the CRC32 table */
* Contruct the CRC32 table
*/
static void sxg_mcast_init_crc32(void) static void sxg_mcast_init_crc32(void)
{ {
u32 c; /* CRC shit reg */ u32 c; /* CRC shit reg */
u32 e = 0; /* Poly X-or pattern */ u32 e = 0; /* Poly X-or pattern */
int i; /* counter */ int i; /* counter */
int k; /* byte being shifted into crc */ int k; /* byte being shifted into crc */
static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 }; static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
...@@ -2831,7 +2912,6 @@ static void sxg_mcast_init_crc32(void) ...@@ -2831,7 +2912,6 @@ static void sxg_mcast_init_crc32(void)
} }
} }
static u32 sxg_crc_init; /* Is table initialized */
/* /*
* Return the MAC hast as described above. * Return the MAC hast as described above.
*/ */
...@@ -2870,19 +2950,23 @@ static void sxg_mcast_set_mask(struct adapter_t *adapter) ...@@ -2870,19 +2950,23 @@ static void sxg_mcast_set_mask(struct adapter_t *adapter)
if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) { if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
/* /*
* Turn on all multicast addresses. We have to do this for promiscuous * Turn on all multicast addresses. We have to do this for
* mode as well as ALLMCAST mode. It saves the Microcode from having * promiscuous mode as well as ALLMCAST mode. It saves the
* to keep state about the MAC configuration. * Microcode from having keep state about the MAC configuration
*/
/* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n
* SLUT MODE!!!\n",__func__);
*/ */
/* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__func__); */
WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH); WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH); WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
/* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */ /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
* 0xFFFFFFFF\n",__func__, adapter->netdev->name);
*/
} else { } else {
/* /*
* Commit our multicast mast to the SLIC by writing to the multicast * Commit our multicast mast to the SLIC by writing to the
* address mask registers * multicast address mask registers
*/ */
DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n", DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
__func__, adapter->netdev->name, __func__, adapter->netdev->name,
...@@ -3173,7 +3257,8 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter, ...@@ -3173,7 +3257,8 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
} }
/* /*
* sxg_allocate_rcvblock_complete - Complete a receive descriptor block allocation * sxg_allocate_rcvblock_complete - Complete a receive descriptor
* block allocation
* *
* Arguments - * Arguments -
* adapter - A pointer to our adapter structure * adapter - A pointer to our adapter structure
...@@ -3223,16 +3308,18 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, ...@@ -3223,16 +3308,18 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
RcvDataBufferHdr = RcvDataBufferHdr =
(struct sxg_rcv_data_buffer_hdr*) (RcvDataBuffer + (struct sxg_rcv_data_buffer_hdr*) (RcvDataBuffer +
SXG_RCV_DATA_BUFFER_HDR_OFFSET SXG_RCV_DATA_BUFFER_HDR_OFFSET
(BufferSize)); (BufferSize));
RcvDataBufferHdr->VirtualAddress = RcvDataBuffer; RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; /* For FREE macro assertion */ /* For FREE macro assertion */
RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
RcvDataBufferHdr->Size = RcvDataBufferHdr->Size =
SXG_RCV_BUFFER_DATA_SIZE(BufferSize); SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr); SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr);
/* ASK hardcoded 2048 */ /* ASK hardcoded 2048 */
RcvDataBufferHdr->PhysicalAddress = pci_map_single(adapter->pcidev, RcvDataBufferHdr->PhysicalAddress =
pci_map_single(adapter->pcidev,
RcvDataBufferHdr->SxgDumbRcvPacket->data, RcvDataBufferHdr->SxgDumbRcvPacket->data,
2048, 2048,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
...@@ -3255,13 +3342,14 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, ...@@ -3255,13 +3342,14 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList); InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
spin_unlock(&adapter->RcvQLock); spin_unlock(&adapter->RcvQLock);
/* Now free the contained receive data buffers that we initialized above */ /* Now free the contained receive data buffers that we
* initialized above */
RcvDataBuffer = RcvBlock; RcvDataBuffer = RcvBlock;
for (i = 0, Paddr = PhysicalAddress; for (i = 0, Paddr = PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) { i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr*) (RcvDataBuffer + RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr*)
SXG_RCV_DATA_BUFFER_HDR_OFFSET (RcvDataBuffer + SXG_RCV_DATA_BUFFER_HDR_OFFSET
(BufferSize)); (BufferSize));
spin_lock(&adapter->RcvQLock); spin_lock(&adapter->RcvQLock);
SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
...@@ -3285,7 +3373,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, ...@@ -3285,7 +3373,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
adapter, RcvBlock, Length, 0); adapter, RcvBlock, Length, 0);
return; return;
fail: fail:
/* Free any allocated resources */ /* Free any allocated resources */
if (RcvBlock) { if (RcvBlock) {
RcvDataBuffer = RcvBlock; RcvDataBuffer = RcvBlock;
...@@ -3293,7 +3381,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, ...@@ -3293,7 +3381,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
i++, RcvDataBuffer += BufferSize) { i++, RcvDataBuffer += BufferSize) {
RcvDataBufferHdr = RcvDataBufferHdr =
(struct sxg_rcv_data_buffer_hdr *) (RcvDataBuffer + (struct sxg_rcv_data_buffer_hdr *) (RcvDataBuffer +
SXG_RCV_DATA_BUFFER_HDR_OFFSET SXG_RCV_DATA_BUFFER_HDR_OFFSET
(BufferSize)); (BufferSize));
SXG_FREE_RCV_PACKET(RcvDataBufferHdr); SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
} }
...@@ -3328,8 +3416,10 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, ...@@ -3328,8 +3416,10 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
spin_lock(&adapter->SglQLock); spin_lock(&adapter->SglQLock);
adapter->AllSglBufferCount++; adapter->AllSglBufferCount++;
memset(SxgSgl, 0, sizeof(struct sxg_scatter_gather)); memset(SxgSgl, 0, sizeof(struct sxg_scatter_gather));
SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */ /* *PhysicalAddress; */
SxgSgl->adapter = adapter; /* Initialize backpointer once */ SxgSgl->PhysicalAddress = PhysicalAddress;
/* Initialize backpointer once */
SxgSgl->adapter = adapter;
InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
spin_unlock(&adapter->SglQLock); spin_unlock(&adapter->SglQLock);
SxgSgl->State = SXG_BUFFER_BUSY; SxgSgl->State = SXG_BUFFER_BUSY;
...@@ -3341,15 +3431,21 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, ...@@ -3341,15 +3431,21 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
static void sxg_adapter_set_hwaddr(struct adapter_t *adapter) static void sxg_adapter_set_hwaddr(struct adapter_t *adapter)
{ {
/* /*
* DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
* card->config_set, adapter->port, adapter->physport, adapter->functionnumber); * funct#[%d]\n", __func__, card->config_set,
* sxg_dbg_macaddrs(adapter); * adapter->port, adapter->physport, adapter->functionnumber);
*/ *
* sxg_dbg_macaddrs(adapter);
*/
memcpy(adapter->macaddr, temp_mac_address,
sizeof(struct sxg_config_mac));
/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
* __FUNCTION__);
*/
/* sxg_dbg_macaddrs(adapter); */
memcpy(adapter->macaddr, temp_mac_address, sizeof(struct sxg_config_mac));
/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */
/* sxg_dbg_macaddrs(adapter); */
if (!(adapter->currmacaddr[0] || if (!(adapter->currmacaddr[0] ||
adapter->currmacaddr[1] || adapter->currmacaddr[1] ||
adapter->currmacaddr[2] || adapter->currmacaddr[2] ||
...@@ -3361,7 +3457,7 @@ static void sxg_adapter_set_hwaddr(struct adapter_t *adapter) ...@@ -3361,7 +3457,7 @@ static void sxg_adapter_set_hwaddr(struct adapter_t *adapter)
memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6); memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
} }
/* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */ /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
sxg_dbg_macaddrs(adapter); sxg_dbg_macaddrs(adapter);
} }
...@@ -3514,8 +3610,7 @@ static int sxg_initialize_adapter(struct adapter_t *adapter) ...@@ -3514,8 +3610,7 @@ static int sxg_initialize_adapter(struct adapter_t *adapter)
* status * status
*/ */
static int sxg_fill_descriptor_block(struct adapter_t *adapter, static int sxg_fill_descriptor_block(struct adapter_t *adapter,
struct sxg_rcv_descriptor_block_hdr struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
*RcvDescriptorBlockHdr)
{ {
u32 i; u32 i;
struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo; struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
...@@ -3544,8 +3639,8 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter, ...@@ -3544,8 +3639,8 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter,
RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr); RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
ASSERT(RingDescriptorCmd); ASSERT(RingDescriptorCmd);
RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD; RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
RcvDescriptorBlock = RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
(struct sxg_rcv_descriptor_block *) RcvDescriptorBlockHdr->VirtualAddress; RcvDescriptorBlockHdr->VirtualAddress;
/* Fill in the descriptor block */ /* Fill in the descriptor block */
for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) { for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
...@@ -3555,11 +3650,13 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter, ...@@ -3555,11 +3650,13 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter,
SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket); SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
RcvDataBufferHdr->State = SXG_BUFFER_ONCARD; RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
RcvDescriptorBlock->Descriptors[i].VirtualAddress = RcvDescriptorBlock->Descriptors[i].VirtualAddress =
(void *)RcvDataBufferHdr; (void *)RcvDataBufferHdr;
if (i == 0) if (i == 0)
printk("ASK:sxg_fill_descriptor_block: first virt address %p\n", RcvDataBufferHdr); printk("ASK:sxg_fill_descriptor_block: first virt \
address %p\n", RcvDataBufferHdr);
if (i == (SXG_RCV_DESCRIPTORS_PER_BLOCK - 1)) if (i == (SXG_RCV_DESCRIPTORS_PER_BLOCK - 1))
printk("ASK:sxg_fill_descriptor_block: last virt address %p\n", RcvDataBufferHdr); printk("ASK:sxg_fill_descriptor_block: last virt \
address %p\n", RcvDataBufferHdr);
RcvDescriptorBlock->Descriptors[i].PhysicalAddress = RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
RcvDataBufferHdr->PhysicalAddress; RcvDataBufferHdr->PhysicalAddress;
...@@ -3616,7 +3713,8 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter) ...@@ -3616,7 +3713,8 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
ReceiveBufferSize), ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV); SXG_BUFFER_TYPE_RCV);
} }
printk("ASK:sxg_stock_rcv_buffers: RcvBuffersOnCard %d\n", adapter->RcvBuffersOnCard); printk("ASK:sxg_stock_rcv_buffers: RcvBuffersOnCard %d\n",
adapter->RcvBuffersOnCard);
/* Now grab the RcvQLock lock and proceed */ /* Now grab the RcvQLock lock and proceed */
spin_lock(&adapter->RcvQLock); spin_lock(&adapter->RcvQLock);
while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
...@@ -3699,11 +3797,10 @@ static void sxg_complete_descriptor_blocks(struct adapter_t *adapter, ...@@ -3699,11 +3797,10 @@ static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
* header. The card will be restocked later via the * header. The card will be restocked later via the
* RcvBuffersOnCard test * RcvBuffersOnCard test
*/ */
if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == if (sxg_fill_descriptor_block(adapter,
STATUS_FAILURE) { RcvDescriptorBlockHdr) == STATUS_FAILURE)
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
RcvDescriptorBlockHdr); RcvDescriptorBlockHdr);
}
} }
spin_unlock(&adapter->RcvQLock); spin_unlock(&adapter->RcvQLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
...@@ -3719,7 +3816,7 @@ static struct pci_driver sxg_driver = { ...@@ -3719,7 +3816,7 @@ static struct pci_driver sxg_driver = {
.suspend = sxgpm_suspend, .suspend = sxgpm_suspend,
.resume = sxgpm_resume, .resume = sxgpm_resume,
#endif #endif
/* .shutdown = slic_shutdown, MOOK_INVESTIGATE */ /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
}; };
static int __init sxg_module_init(void) static int __init sxg_module_init(void)
......
...@@ -62,13 +62,13 @@ struct sxg_stats { ...@@ -62,13 +62,13 @@ struct sxg_stats {
u64 DumbXmtUcastBytes; /* OID_GEN_DIRECTED_BYTES_XMIT */ u64 DumbXmtUcastBytes; /* OID_GEN_DIRECTED_BYTES_XMIT */
u64 DumbXmtMcastBytes; /* OID_GEN_MULTICAST_BYTES_XMIT */ u64 DumbXmtMcastBytes; /* OID_GEN_MULTICAST_BYTES_XMIT */
u64 DumbXmtBcastBytes; /* OID_GEN_BROADCAST_BYTES_XMIT */ u64 DumbXmtBcastBytes; /* OID_GEN_BROADCAST_BYTES_XMIT */
u64 XmtErrors; /* OID_GEN_XMIT_ERROR */ u64 XmtErrors; /* OID_GEN_XMIT_ERROR */
u64 XmtDiscards; /* OID_GEN_XMIT_DISCARDS */ u64 XmtDiscards; /* OID_GEN_XMIT_DISCARDS */
u64 XmtOk; /* OID_GEN_XMIT_OK */ u64 XmtOk; /* OID_GEN_XMIT_OK */
u64 XmtQLen; /* OID_GEN_TRANSMIT_QUEUE_LENGTH */ u64 XmtQLen; /* OID_GEN_TRANSMIT_QUEUE_LENGTH */
u64 XmtZeroFull; /* Transmit ring zero full */ u64 XmtZeroFull; /* Transmit ring zero full */
/* Rcv */ /* Rcv */
u32 RcvNBL; /* Offload recieve NBL count */ u32 RcvNBL; /* Offload recieve NBL count */
u64 DumbRcvBytes; /* dumbnic recv bytes */ u64 DumbRcvBytes; /* dumbnic recv bytes */
u64 DumbRcvUcastBytes; /* OID_GEN_DIRECTED_BYTES_RCV */ u64 DumbRcvUcastBytes; /* OID_GEN_DIRECTED_BYTES_RCV */
u64 DumbRcvMcastBytes; /* OID_GEN_MULTICAST_BYTES_RCV */ u64 DumbRcvMcastBytes; /* OID_GEN_MULTICAST_BYTES_RCV */
...@@ -116,14 +116,14 @@ struct sxg_stats { ...@@ -116,14 +116,14 @@ struct sxg_stats {
/* DUMB-NIC Send path definitions */ /* DUMB-NIC Send path definitions */
#define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb) { \ #define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb) { \
ASSERT(_skb); \ ASSERT(_skb); \
dev_kfree_skb_irq(_skb); \ dev_kfree_skb_irq(_skb); \
} }
#define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \ #define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \
ASSERT(_skb); \ ASSERT(_skb); \
dev_kfree_skb(_skb); \ dev_kfree_skb(_skb); \
} }
/* /*
...@@ -139,21 +139,21 @@ struct sxg_stats { ...@@ -139,21 +139,21 @@ struct sxg_stats {
/* Indications array size */ /* Indications array size */
#define SXG_RCV_ARRAYSIZE 64 #define SXG_RCV_ARRAYSIZE 64
#define SXG_ALLOCATE_RCV_PACKET(_pAdapt, _RcvDataBufferHdr) { \ #define SXG_ALLOCATE_RCV_PACKET(_pAdapt, _RcvDataBufferHdr) { \
struct sk_buff * skb; \ struct sk_buff * skb; \
skb = netdev_alloc_skb(_pAdapt->netdev, 2048); \ skb = netdev_alloc_skb(_pAdapt->netdev, 2048); \
if (skb) { \ if (skb) { \
(_RcvDataBufferHdr)->skb = skb; \ (_RcvDataBufferHdr)->skb = skb; \
skb->next = NULL; \ skb->next = NULL; \
} else { \ } else { \
(_RcvDataBufferHdr)->skb = NULL; \ (_RcvDataBufferHdr)->skb = NULL; \
} \ } \
} }
#define SXG_FREE_RCV_PACKET(_RcvDataBufferHdr) { \ #define SXG_FREE_RCV_PACKET(_RcvDataBufferHdr) { \
if((_RcvDataBufferHdr)->skb) { \ if((_RcvDataBufferHdr)->skb) { \
dev_kfree_skb((_RcvDataBufferHdr)->skb); \ dev_kfree_skb((_RcvDataBufferHdr)->skb); \
} \ } \
} }
/* /*
...@@ -161,54 +161,58 @@ struct sxg_stats { ...@@ -161,54 +161,58 @@ struct sxg_stats {
* If we fill up our array of packet pointers, then indicate this * If we fill up our array of packet pointers, then indicate this
* block up now and start on a new one. * block up now and start on a new one.
*/ */
#define SXG_ADD_RCV_PACKET(_pAdapt, _Packet, _PrevPacket, _IndicationList, _NumPackets) { \ #define SXG_ADD_RCV_PACKET(_pAdapt, _Packet, _PrevPacket, _IndicationList, \
(_IndicationList)[_NumPackets] = (_Packet); \ _NumPackets) { \
(_NumPackets)++; \ (_IndicationList)[_NumPackets] = (_Packet); \
if((_NumPackets) == SXG_RCV_ARRAYSIZE) { \ (_NumPackets)++; \
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \ if((_NumPackets) == SXG_RCV_ARRAYSIZE) { \
(_NumPackets), 0, 0, 0); \ SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \
netif_rx((_IndicationList),(_NumPackets)); \ (_NumPackets), 0, 0, 0); \
(_NumPackets) = 0; \ netif_rx((_IndicationList),(_NumPackets)); \
} \ (_NumPackets) = 0; \
} \
} }
#define SXG_INDICATE_PACKETS(_pAdapt, _IndicationList, _NumPackets) { \ #define SXG_INDICATE_PACKETS(_pAdapt, _IndicationList, _NumPackets) { \
if(_NumPackets) { \ if(_NumPackets) { \
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \ SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \
(_NumPackets), 0, 0, 0); \ (_NumPackets), 0, 0, 0); \
netif_rx((_IndicationList),(_NumPackets)); \ netif_rx((_IndicationList),(_NumPackets)); \
(_NumPackets) = 0; \ (_NumPackets) = 0; \
} \ } \
} }
#define SXG_REINIATIALIZE_PACKET(_Packet) \ #define SXG_REINIATIALIZE_PACKET(_Packet) \
{} /*_NdisReinitializePacket(_Packet)*/ /* this is not necessary with an skb */ {} /*_NdisReinitializePacket(_Packet)*/
/* this is not necessary with an skb */
/* Definitions to initialize Dumb-nic Receive NBLs */ /* Definitions to initialize Dumb-nic Receive NBLs */
#define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((struct sxg_rcv_nbl_reserved *)((_Packet)->MiniportReservedEx))->RcvDataBufferHdr) #define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((struct sxg_rcv_nbl_reserved *)\
((_Packet)->MiniportReservedEx))->RcvDataBufferHdr)
#define SXG_RCV_SET_CHECKSUM_INFO(_Packet, _Cpi) \ #define SXG_RCV_SET_CHECKSUM_INFO(_Packet, _Cpi) \
NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), TcpIpChecksumPacketInfo) = (PVOID)(_Cpi) NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), \
TcpIpChecksumPacketInfo) = (PVOID)(_Cpi)
#define SXG_RCV_SET_TOEPLITZ(_Packet, _Toeplitz, _Type, _Function) { \ #define SXG_RCV_SET_TOEPLITZ(_Packet, _Toeplitz, _Type, _Function) { \
NDIS_PACKET_SET_HASH_VALUE((_Packet), (_Toeplitz)); \ NDIS_PACKET_SET_HASH_VALUE((_Packet), (_Toeplitz)); \
NDIS_PACKET_SET_HASH_TYPE((_Packet), (_Type)); \ NDIS_PACKET_SET_HASH_TYPE((_Packet), (_Type)); \
NDIS_PACKET_SET_HASH_FUNCTION((_Packet), (_Function)); \ NDIS_PACKET_SET_HASH_FUNCTION((_Packet), (_Function)); \
} }
#define SXG_RCV_SET_VLAN_INFO(_Packet, _VlanId, _Priority) { \ #define SXG_RCV_SET_VLAN_INFO(_Packet, _VlanId, _Priority) { \
NDIS_PACKET_8021Q_INFO _Packet8021qInfo; \ NDIS_PACKET_8021Q_INFO _Packet8021qInfo; \
_Packet8021qInfo.TagHeader.VlanId = (_VlanId); \ _Packet8021qInfo.TagHeader.VlanId = (_VlanId); \
_Packet8021qInfo.TagHeader.UserPriority = (_Priority); \ _Packet8021qInfo.TagHeader.UserPriority = (_Priority); \
NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), Ieee8021QNetBufferListInfo) = \ NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), Ieee8021QNetBufferListInfo) = \
_Packet8021qInfo.Value; \ _Packet8021qInfo.Value; \
} }
#define SXG_ADJUST_RCV_PACKET(_Packet, _RcvDataBufferHdr, _Event) { \ #define SXG_ADJUST_RCV_PACKET(_Packet, _RcvDataBufferHdr, _Event) { \
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbRcv", \ SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbRcv", \
(_RcvDataBufferHdr), (_Packet), \ (_RcvDataBufferHdr), (_Packet), \
(_Event)->Status, 0); \ (_Event)->Status, 0); \
ASSERT((_Event)->Length <= (_RcvDataBufferHdr)->Size); \ ASSERT((_Event)->Length <= (_RcvDataBufferHdr)->Size); \
skb_put(Packet, (_Event)->Length); \ skb_put(Packet, (_Event)->Length); \
} }
...@@ -216,47 +220,49 @@ struct sxg_stats { ...@@ -216,47 +220,49 @@ struct sxg_stats {
* Macros to free a receive data buffer and receive data descriptor block * Macros to free a receive data buffer and receive data descriptor block
* NOTE - Lock must be held with RCV macros * NOTE - Lock must be held with RCV macros
*/ */
#define SXG_GET_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ #define SXG_GET_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \
struct list_entry *_ple; \ struct list_entry *_ple; \
_Hdr = NULL; \ _Hdr = NULL; \
if((_pAdapt)->FreeRcvBufferCount) { \ if((_pAdapt)->FreeRcvBufferCount) { \
ASSERT(!(IsListEmpty(&(_pAdapt)->FreeRcvBuffers))); \ ASSERT(!(IsListEmpty(&(_pAdapt)->FreeRcvBuffers))); \
_ple = RemoveHeadList(&(_pAdapt)->FreeRcvBuffers); \ _ple = RemoveHeadList(&(_pAdapt)->FreeRcvBuffers); \
(_Hdr) = container_of(_ple, struct sxg_rcv_data_buffer_hdr, FreeList); \ (_Hdr) = container_of(_ple, struct sxg_rcv_data_buffer_hdr, \
(_pAdapt)->FreeRcvBufferCount--; \ FreeList); \
ASSERT((_Hdr)->State == SXG_BUFFER_FREE); \ (_pAdapt)->FreeRcvBufferCount--; \
} \ ASSERT((_Hdr)->State == SXG_BUFFER_FREE); \
} \
} }
#define SXG_FREE_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ #define SXG_FREE_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RtnDHdr", \ SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RtnDHdr", \
(_Hdr), (_pAdapt)->FreeRcvBufferCount, \ (_Hdr), (_pAdapt)->FreeRcvBufferCount, \
(_Hdr)->State, (_Hdr)->VirtualAddress); \ (_Hdr)->State, (_Hdr)->VirtualAddress); \
/* SXG_RESTORE_MDL_OFFSET(_Hdr); */ \ /* SXG_RESTORE_MDL_OFFSET(_Hdr); */ \
(_pAdapt)->FreeRcvBufferCount++; \ (_pAdapt)->FreeRcvBufferCount++; \
ASSERT(((_pAdapt)->AllRcvBlockCount * SXG_RCV_DESCRIPTORS_PER_BLOCK) >= (_pAdapt)->FreeRcvBufferCount); \ ASSERT(((_pAdapt)->AllRcvBlockCount * SXG_RCV_DESCRIPTORS_PER_BLOCK) \
ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \ >= (_pAdapt)->FreeRcvBufferCount); \
(_Hdr)->State = SXG_BUFFER_FREE; \ ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \
InsertTailList(&(_pAdapt)->FreeRcvBuffers, &((_Hdr)->FreeList)); \ (_Hdr)->State = SXG_BUFFER_FREE; \
InsertTailList(&(_pAdapt)->FreeRcvBuffers, &((_Hdr)->FreeList)); \
} }
#define SXG_FREE_RCV_DESCRIPTOR_BLOCK(_pAdapt, _Hdr) { \ #define SXG_FREE_RCV_DESCRIPTOR_BLOCK(_pAdapt, _Hdr) { \
ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \ ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \
(_Hdr)->State = SXG_BUFFER_FREE; \ (_Hdr)->State = SXG_BUFFER_FREE; \
(_pAdapt)->FreeRcvBlockCount++; \ (_pAdapt)->FreeRcvBlockCount++; \
ASSERT((_pAdapt)->AllRcvBlockCount >= (_pAdapt)->FreeRcvBlockCount); \ ASSERT((_pAdapt)->AllRcvBlockCount >= (_pAdapt)->FreeRcvBlockCount); \
InsertTailList(&(_pAdapt)->FreeRcvBlocks, &(_Hdr)->FreeList); \ InsertTailList(&(_pAdapt)->FreeRcvBlocks, &(_Hdr)->FreeList); \
} }
/* SGL macros */ /* SGL macros */
#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \ #define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \
spin_lock(&(_pAdapt)->SglQLock); \ spin_lock(&(_pAdapt)->SglQLock); \
(_pAdapt)->FreeSglBufferCount++; \ (_pAdapt)->FreeSglBufferCount++; \
ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount);\ ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount); \
ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \ ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \
(_Sgl)->State = SXG_BUFFER_FREE; \ (_Sgl)->State = SXG_BUFFER_FREE; \
InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \ InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \
spin_unlock(&(_pAdapt)->SglQLock); \ spin_unlock(&(_pAdapt)->SglQLock); \
} }
/* /*
...@@ -267,7 +273,7 @@ struct sxg_stats { ...@@ -267,7 +273,7 @@ struct sxg_stats {
* and not grabbing it avoids a possible double-trip. * and not grabbing it avoids a possible double-trip.
*/ */
#define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl) { \ #define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl) { \
struct list_entry *_ple; \ struct list_entry *_ple; \
if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \ if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \
(_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \ (_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \
(_pAdapt->AllocationsPending == 0)) { \ (_pAdapt->AllocationsPending == 0)) { \
...@@ -280,7 +286,8 @@ struct sxg_stats { ...@@ -280,7 +286,8 @@ struct sxg_stats {
if((_pAdapt)->FreeSglBufferCount) { \ if((_pAdapt)->FreeSglBufferCount) { \
ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \ ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \
_ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \ _ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \
(_Sgl) = container_of(_ple, struct sxg_scatter_gather, FreeList); \ (_Sgl) = container_of(_ple, struct sxg_scatter_gather, \
FreeList); \
(_pAdapt)->FreeSglBufferCount--; \ (_pAdapt)->FreeSglBufferCount--; \
ASSERT((_Sgl)->State == SXG_BUFFER_FREE); \ ASSERT((_Sgl)->State == SXG_BUFFER_FREE); \
(_Sgl)->State = SXG_BUFFER_BUSY; \ (_Sgl)->State = SXG_BUFFER_BUSY; \
...@@ -294,7 +301,7 @@ struct sxg_stats { ...@@ -294,7 +301,7 @@ struct sxg_stats {
* Linked list of multicast addresses. * Linked list of multicast addresses.
*/ */
struct sxg_multicast_address { struct sxg_multicast_address {
unsigned char Address[6]; unsigned char Address[6];
struct sxg_multicast_address *Next; struct sxg_multicast_address *Next;
}; };
...@@ -319,20 +326,20 @@ struct sxg_buffer_queue { ...@@ -319,20 +326,20 @@ struct sxg_buffer_queue {
#define SXG_FAST_SEND_BUFFER 1 #define SXG_FAST_SEND_BUFFER 1
#define SXG_RECEIVE_BUFFER 2 #define SXG_RECEIVE_BUFFER 2
#define SXG_INIT_BUFFER(_Buffer, _Type) { \ #define SXG_INIT_BUFFER(_Buffer, _Type) { \
(_Buffer)->Type = (_Type); \ (_Buffer)->Type = (_Type); \
if((_Type) == SXG_RECEIVE_BUFFER) { \ if((_Type) == SXG_RECEIVE_BUFFER) { \
(_Buffer)->Direction = 0; \ (_Buffer)->Direction = 0; \
} else { \ } else { \
(_Buffer)->Direction = NDIS_SG_LIST_WRITE_TO_DEVICE; \ (_Buffer)->Direction = NDIS_SG_LIST_WRITE_TO_DEVICE; \
} \ } \
(_Buffer)->Bytes = 0; \ (_Buffer)->Bytes = 0; \
(_Buffer)->Head = NULL; \ (_Buffer)->Head = NULL; \
(_Buffer)->Tail = NULL; \ (_Buffer)->Tail = NULL; \
} }
#define SXG_RSS_CPU_COUNT(_pAdapt) \ #define SXG_RSS_CPU_COUNT(_pAdapt) \
((_pAdapt)->RssEnabled ? NR_CPUS : 1) ((_pAdapt)->RssEnabled ? NR_CPUS : 1)
/* DRIVER and ADAPTER structures */ /* DRIVER and ADAPTER structures */
...@@ -367,9 +374,9 @@ enum SXG_LINK_STATE { ...@@ -367,9 +374,9 @@ enum SXG_LINK_STATE {
/* Microcode file selection codes */ /* Microcode file selection codes */
enum SXG_UCODE_SEL { enum SXG_UCODE_SEL {
SXG_UCODE_SAHARA, /* Sahara ucode */ SXG_UCODE_SAHARA, /* Sahara ucode */
SXG_UCODE_SDIAGCPU, /* Sahara CPU diagnostic ucode */ SXG_UCODE_SDIAGCPU, /* Sahara CPU diagnostic ucode */
SXG_UCODE_SDIAGSYS /* Sahara system diagnostic ucode */ SXG_UCODE_SDIAGSYS /* Sahara system diagnostic ucode */
}; };
...@@ -378,8 +385,9 @@ enum SXG_UCODE_SEL { ...@@ -378,8 +385,9 @@ enum SXG_UCODE_SEL {
/* This probably lives in a proto.h file. Move later */ /* This probably lives in a proto.h file. Move later */
#define SXG_MULTICAST_PACKET(_pether) ((_pether)->ether_dhost[0] & 0x01) #define SXG_MULTICAST_PACKET(_pether) ((_pether)->ether_dhost[0] & 0x01)
#define SXG_BROADCAST_PACKET(_pether) ((*(u32 *)(_pether)->ether_dhost == 0xFFFFFFFF) && \ #define SXG_BROADCAST_PACKET(_pether) \
(*(u16 *)&(_pether)->ether_dhost[4] == 0xFFFF)) ((*(u32 *)(_pether)->ether_dhost == 0xFFFFFFFF) && \
(*(u16 *)&(_pether)->ether_dhost[4] == 0xFFFF))
/* For DbgPrints */ /* For DbgPrints */
#define SXG_ID DPFLTR_IHVNETWORK_ID #define SXG_ID DPFLTR_IHVNETWORK_ID
...@@ -420,28 +428,28 @@ struct sxg_driver { ...@@ -420,28 +428,28 @@ struct sxg_driver {
* Mojave supports 16K, Oasis supports 16K-1, so * Mojave supports 16K, Oasis supports 16K-1, so
* just set this at 15K, shouldnt make that much of a diff. * just set this at 15K, shouldnt make that much of a diff.
*/ */
#define DUMP_BUF_SIZE 0x3C00 #define DUMP_BUF_SIZE 0x3C00
#endif #endif
#define MIN(a, b) ((u32)(a) < (u32)(b) ? (a) : (b)) #define MIN(a, b) ((u32)(a) < (u32)(b) ? (a) : (b))
#define MAX(a, b) ((u32)(a) > (u32)(b) ? (a) : (b)) #define MAX(a, b) ((u32)(a) > (u32)(b) ? (a) : (b))
struct mcast_address { struct mcast_address {
unsigned char address[6]; unsigned char address[6];
struct mcast_address *next; struct mcast_address *next;
}; };
#define CARD_DOWN 0x00000000 #define CARD_DOWN 0x00000000
#define CARD_UP 0x00000001 #define CARD_UP 0x00000001
#define CARD_FAIL 0x00000002 #define CARD_FAIL 0x00000002
#define CARD_DIAG 0x00000003 #define CARD_DIAG 0x00000003
#define CARD_SLEEP 0x00000004 #define CARD_SLEEP 0x00000004
#define ADAPT_DOWN 0x00 #define ADAPT_DOWN 0x00
#define ADAPT_UP 0x01 #define ADAPT_UP 0x01
#define ADAPT_FAIL 0x02 #define ADAPT_FAIL 0x02
#define ADAPT_RESET 0x03 #define ADAPT_RESET 0x03
#define ADAPT_SLEEP 0x04 #define ADAPT_SLEEP 0x04
#define ADAPT_FLAGS_BOOTTIME 0x0001 #define ADAPT_FLAGS_BOOTTIME 0x0001
#define ADAPT_FLAGS_IS64BIT 0x0002 #define ADAPT_FLAGS_IS64BIT 0x0002
...@@ -453,29 +461,30 @@ struct mcast_address { ...@@ -453,29 +461,30 @@ struct mcast_address {
#define ADAPT_FLAGS_STATS_TIMER_SET 0x0080 #define ADAPT_FLAGS_STATS_TIMER_SET 0x0080
#define ADAPT_FLAGS_RESET_TIMER_SET 0x0100 #define ADAPT_FLAGS_RESET_TIMER_SET 0x0100
#define LINK_DOWN 0x00 #define LINK_DOWN 0x00
#define LINK_CONFIG 0x01 #define LINK_CONFIG 0x01
#define LINK_UP 0x02 #define LINK_UP 0x02
#define LINK_10MB 0x00 #define LINK_10MB 0x00
#define LINK_100MB 0x01 #define LINK_100MB 0x01
#define LINK_AUTOSPEED 0x02 #define LINK_AUTOSPEED 0x02
#define LINK_1000MB 0x03 #define LINK_1000MB 0x03
#define LINK_10000MB 0x04 #define LINK_10000MB 0x04
#define LINK_HALFD 0x00 #define LINK_HALFD 0x00
#define LINK_FULLD 0x01 #define LINK_FULLD 0x01
#define LINK_AUTOD 0x02 #define LINK_AUTOD 0x02
#define MAC_DIRECTED 0x00000001 #define MAC_DIRECTED 0x00000001
#define MAC_BCAST 0x00000002 #define MAC_BCAST 0x00000002
#define MAC_MCAST 0x00000004 #define MAC_MCAST 0x00000004
#define MAC_PROMISC 0x00000008 #define MAC_PROMISC 0x00000008
#define MAC_LOOPBACK 0x00000010 #define MAC_LOOPBACK 0x00000010
#define MAC_ALLMCAST 0x00000020 #define MAC_ALLMCAST 0x00000020
#define SLIC_DUPLEX(x) ((x==LINK_FULLD) ? "FDX" : "HDX") #define SLIC_DUPLEX(x) ((x==LINK_FULLD) ? "FDX" : "HDX")
#define SLIC_SPEED(x) ((x==LINK_100MB) ? "100Mb" : ((x==LINK_1000MB) ? "1000Mb" : " 10Mb")) #define SLIC_SPEED(x) ((x==LINK_100MB) ? "100Mb" : \
((x==LINK_1000MB) ? "1000Mb" : " 10Mb"))
#define SLIC_LINKSTATE(x) ((x==LINK_DOWN) ? "Down" : "Up ") #define SLIC_LINKSTATE(x) ((x==LINK_DOWN) ? "Down" : "Up ")
#define SLIC_ADAPTER_STATE(x) ((x==ADAPT_UP) ? "UP" : "Down") #define SLIC_ADAPTER_STATE(x) ((x==ADAPT_UP) ? "UP" : "Down")
#define SLIC_CARD_STATE(x) ((x==CARD_UP) ? "UP" : "Down") #define SLIC_CARD_STATE(x) ((x==CARD_UP) ? "UP" : "Down")
...@@ -492,8 +501,8 @@ struct ether_header { ...@@ -492,8 +501,8 @@ struct ether_header {
#define NUM_CFG_REGS 64 #define NUM_CFG_REGS 64
struct physcard { struct physcard {
struct adapter_t *adapter[SLIC_MAX_PORTS]; struct adapter_t *adapter[SLIC_MAX_PORTS];
struct physcard *next; struct physcard *next;
unsigned int adapters_allocd; unsigned int adapters_allocd;
}; };
...@@ -687,7 +696,6 @@ struct adapter_t { ...@@ -687,7 +696,6 @@ struct adapter_t {
/* PSXG_DUMP_CMD DumpBuffer; */ /* 68k - Cmd and Buffer */ /* PSXG_DUMP_CMD DumpBuffer; */ /* 68k - Cmd and Buffer */
/* dma_addr_t PDumpBuffer; */ /* Physical address */ /* dma_addr_t PDumpBuffer; */ /* Physical address */
/*#endif */ /* SXG_FAILURE_DUMP */ /*#endif */ /* SXG_FAILURE_DUMP */
}; };
#if SLIC_DUMP_ENABLED #if SLIC_DUMP_ENABLED
...@@ -721,13 +729,13 @@ struct slic_crash_info { ...@@ -721,13 +729,13 @@ struct slic_crash_info {
(largestat) += ((newstat) - (oldstat)); \ (largestat) += ((newstat) - (oldstat)); \
} }
#define ETHER_EQ_ADDR(_AddrA, _AddrB, _Result) \ #define ETHER_EQ_ADDR(_AddrA, _AddrB, _Result) \
{ \ { \
_Result = TRUE; \ _Result = TRUE; \
if (*(u32 *)(_AddrA) != *(u32 *)(_AddrB)) \ if (*(u32 *)(_AddrA) != *(u32 *)(_AddrB)) \
_Result = FALSE; \ _Result = FALSE; \
if (*(u16 *)(&((_AddrA)[4])) != *(u16 *)(&((_AddrB)[4]))) \ if (*(u16 *)(&((_AddrA)[4])) != *(u16 *)(&((_AddrB)[4]))) \
_Result = FALSE; \ _Result = FALSE; \
} }
#define ETHERMAXFRAME 1514 #define ETHERMAXFRAME 1514
...@@ -735,7 +743,8 @@ struct slic_crash_info { ...@@ -735,7 +743,8 @@ struct slic_crash_info {
#if defined(CONFIG_X86_64) || defined(CONFIG_IA64) #if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
#define SXG_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & 0x00000000FFFFFFFF) #define SXG_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & 0x00000000FFFFFFFF)
#define SXG_GET_ADDR_HIGH(_addr) (u32)(((u64)(_addr) >> 32) & 0x00000000FFFFFFFF) #define SXG_GET_ADDR_HIGH(_addr) \
(u32)(((u64)(_addr) >> 32) & 0x00000000FFFFFFFF)
#else #else
#define SXG_GET_ADDR_LOW(_addr) (u32)_addr #define SXG_GET_ADDR_LOW(_addr) (u32)_addr
#define SXG_GET_ADDR_HIGH(_addr) (u32)0 #define SXG_GET_ADDR_HIGH(_addr) (u32)0
...@@ -744,8 +753,8 @@ struct slic_crash_info { ...@@ -744,8 +753,8 @@ struct slic_crash_info {
#define FLUSH TRUE #define FLUSH TRUE
#define DONT_FLUSH FALSE #define DONT_FLUSH FALSE
#define SIOCSLICDUMPCARD SIOCDEVPRIVATE+9 #define SIOCSLICDUMPCARD (SIOCDEVPRIVATE+9)
#define SIOCSLICSETINTAGG SIOCDEVPRIVATE+10 #define SIOCSLICSETINTAGG (SIOCDEVPRIVATE+10)
#define SIOCSLICTRACEDUMP SIOCDEVPRIVATE+11 #define SIOCSLICTRACEDUMP (SIOCDEVPRIVATE+11)
#endif /* __SXG_DRIVER_H__ */ #endif /* __SXG_DRIVER_H__ */
...@@ -49,26 +49,26 @@ struct list_entry { ...@@ -49,26 +49,26 @@ struct list_entry {
struct list_entry *nle_blink; struct list_entry *nle_blink;
}; };
#define InitializeListHead(l) \ #define InitializeListHead(l) \
(l)->nle_flink = (l)->nle_blink = (l) (l)->nle_flink = (l)->nle_blink = (l)
#define IsListEmpty(h) \ #define IsListEmpty(h) \
((h)->nle_flink == (h)) ((h)->nle_flink == (h))
#define RemoveEntryList(e) \ #define RemoveEntryList(e) \
do { \ do { \
list_entry *b; \ list_entry *b; \
list_entry *f; \ list_entry *f; \
\ \
f = (e)->nle_flink; \ f = (e)->nle_flink; \
b = (e)->nle_blink; \ b = (e)->nle_blink; \
b->nle_flink = f; \ b->nle_flink = f; \
f->nle_blink = b; \ f->nle_blink = b; \
} while (0) } while (0)
/* These two have to be inlined since they return things. */ /* These two have to be inlined since they return things. */
static __inline struct list_entry *RemoveHeadList(struct list_entry *l) static inline struct list_entry *RemoveHeadList(struct list_entry *l)
{ {
struct list_entry *f; struct list_entry *f;
struct list_entry *e; struct list_entry *e;
...@@ -81,7 +81,7 @@ static __inline struct list_entry *RemoveHeadList(struct list_entry *l) ...@@ -81,7 +81,7 @@ static __inline struct list_entry *RemoveHeadList(struct list_entry *l)
return (e); return (e);
} }
static __inline struct list_entry *RemoveTailList(struct list_entry *l) static inline struct list_entry *RemoveTailList(struct list_entry *l)
{ {
struct list_entry *b; struct list_entry *b;
struct list_entry *e; struct list_entry *e;
...@@ -94,35 +94,35 @@ static __inline struct list_entry *RemoveTailList(struct list_entry *l) ...@@ -94,35 +94,35 @@ static __inline struct list_entry *RemoveTailList(struct list_entry *l)
return (e); return (e);
} }
#define InsertTailList(l, e) \ #define InsertTailList(l, e) \
do { \ do { \
struct list_entry *b; \ struct list_entry *b; \
\ \
b = (l)->nle_blink; \ b = (l)->nle_blink; \
(e)->nle_flink = (l); \ (e)->nle_flink = (l); \
(e)->nle_blink = b; \ (e)->nle_blink = b; \
b->nle_flink = (e); \ b->nle_flink = (e); \
(l)->nle_blink = (e); \ (l)->nle_blink = (e); \
} while (0) } while (0)
#define InsertHeadList(l, e) \ #define InsertHeadList(l, e) \
do { \ do { \
struct list_entry *f; \ struct list_entry *f; \
\ \
f = (l)->nle_flink; \ f = (l)->nle_flink; \
(e)->nle_flink = f; \ (e)->nle_flink = f; \
(e)->nle_blink = l; \ (e)->nle_blink = l; \
f->nle_blink = (e); \ f->nle_blink = (e); \
(l)->nle_flink = (e); \ (l)->nle_flink = (e); \
} while (0) } while (0)
#define ATK_DEBUG 1 #define ATK_DEBUG 1
#if ATK_DEBUG #if ATK_DEBUG
#define SLIC_TIMESTAMP(value) { \ #define SLIC_TIMESTAMP(value) { \
struct timeval timev; \ struct timeval timev; \
do_gettimeofday(&timev); \ do_gettimeofday(&timev); \
value = timev.tv_sec*1000000 + timev.tv_usec; \ value = timev.tv_sec*1000000 + timev.tv_usec; \
} }
#else #else
#define SLIC_TIMESTAMP(value) #define SLIC_TIMESTAMP(value)
...@@ -131,17 +131,19 @@ static __inline struct list_entry *RemoveTailList(struct list_entry *l) ...@@ -131,17 +131,19 @@ static __inline struct list_entry *RemoveTailList(struct list_entry *l)
/* SXG DEFINES */ /* SXG DEFINES */
#ifdef ATKDBG #ifdef ATKDBG
#define SXG_TIMESTAMP(value) { \ #define SXG_TIMESTAMP(value) { \
struct timeval timev; \ struct timeval timev; \
do_gettimeofday(&timev); \ do_gettimeofday(&timev); \
value = timev.tv_sec*1000000 + timev.tv_usec; \ value = timev.tv_sec*1000000 + timev.tv_usec; \
} }
#else #else
#define SXG_TIMESTAMP(value) #define SXG_TIMESTAMP(value)
#endif #endif
#define WRITE_REG(reg,value,flush) sxg_reg32_write((&reg), (value), (flush)) #define WRITE_REG(reg,value,flush) \
#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(&reg),(value),(cpu)) sxg_reg32_write((&reg), (value), (flush))
#define WRITE_REG64(a,reg,value,cpu) \
sxg_reg64_write((a),(&reg),(value),(cpu))
#define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg)) #define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg))
#endif /* _SLIC_OS_SPECIFIC_H_ */ #endif /* _SLIC_OS_SPECIFIC_H_ */
...@@ -55,12 +55,12 @@ ...@@ -55,12 +55,12 @@
#define SXG_ASSERT_ENABLED #define SXG_ASSERT_ENABLED
#ifdef SXG_ASSERT_ENABLED #ifdef SXG_ASSERT_ENABLED
#ifndef ASSERT #ifndef ASSERT
#define ASSERT(a) \ #define ASSERT(a) \
{ \ { \
if (!(a)) { \ if (!(a)) { \
DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\ DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n", \
__FILE__, __func__, __LINE__); \ __FILE__, __func__, __LINE__); \
} \ } \
} }
#endif #endif
#else #else
...@@ -88,16 +88,17 @@ extern ulong ATKTimerDiv; ...@@ -88,16 +88,17 @@ extern ulong ATKTimerDiv;
* parameters. * parameters.
*/ */
struct trace_entry { struct trace_entry {
char name[8]; /* 8 character name - like 's'i'm'b'a'r'c'v' */ char name[8];/* 8 character name - like 's'i'm'b'a'r'c'v' */
u32 time; /* Current clock tic */ u32 time; /* Current clock tic */
unsigned char cpu; /* Current CPU */ unsigned char cpu; /* Current CPU */
unsigned char irql; /* Current IRQL */ unsigned char irql; /* Current IRQL */
unsigned char driver; /* The driver which added the trace call */ unsigned char driver;/* The driver which added the trace call */
unsigned char pad2; /* pad to 4 byte boundary - will probably get used */ /* pad to 4 byte boundary - will probably get used */
u32 arg1; /* Caller arg1 */ unsigned char pad2;
u32 arg2; /* Caller arg2 */ u32 arg1; /* Caller arg1 */
u32 arg3; /* Caller arg3 */ u32 arg2; /* Caller arg2 */
u32 arg4; /* Caller arg4 */ u32 arg3; /* Caller arg3 */
u32 arg4; /* Caller arg4 */
}; };
/* Driver types for driver field in struct trace_entry */ /* Driver types for driver field in struct trace_entry */
...@@ -108,11 +109,12 @@ struct trace_entry { ...@@ -108,11 +109,12 @@ struct trace_entry {
#define TRACE_ENTRIES 1024 #define TRACE_ENTRIES 1024
struct sxg_trace_buffer { struct sxg_trace_buffer {
unsigned int size; /* aid for windbg extension */ /* aid for windbg extension */
unsigned int in; /* Where to add */ unsigned int size;
unsigned int level; /* Current Trace level */ unsigned int in; /* Where to add */
spinlock_t lock; /* For MP tracing */ unsigned int level; /* Current Trace level */
struct trace_entry entries[TRACE_ENTRIES];/* The circular buffer */ spinlock_t lock; /* For MP tracing */
struct trace_entry entries[TRACE_ENTRIES];/* The circular buffer */
}; };
/* /*
...@@ -143,22 +145,22 @@ struct sxg_trace_buffer { ...@@ -143,22 +145,22 @@ struct sxg_trace_buffer {
/*The trace macro. This is active only if ATK_TRACE_ENABLED is set. */ /*The trace macro. This is active only if ATK_TRACE_ENABLED is set. */
#if ATK_TRACE_ENABLED #if ATK_TRACE_ENABLED
#define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4) { \ #define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4) { \
if ((buffer) && ((buffer)->level >= (tlevel))) { \ if ((buffer) && ((buffer)->level >= (tlevel))) { \
unsigned int trace_irql = 0; /* ?????? FIX THIS */ \ unsigned int trace_irql = 0;/* ?????? FIX THIS */\
unsigned int trace_len; \ unsigned int trace_len; \
struct trace_entry *trace_entry; \ struct trace_entry *trace_entry; \
struct timeval timev; \ struct timeval timev; \
\ \
spin_lock(&(buffer)->lock); \ spin_lock(&(buffer)->lock); \
trace_entry = &(buffer)->entries[(buffer)->in]; \ trace_entry = &(buffer)->entries[(buffer)->in]; \
do_gettimeofday(&timev); \ do_gettimeofday(&timev); \
\ \
memset(trace_entry->name, 0, 8); \ memset(trace_entry->name, 0, 8); \
trace_len = strlen(tname); \ trace_len = strlen(tname); \
trace_len = trace_len > 8 ? 8 : trace_len; \ trace_len = trace_len > 8 ? 8 : trace_len; \
memcpy(trace_entry->name, (tname), trace_len); \ memcpy(trace_entry->name, (tname), trace_len); \
trace_entry->time = timev.tv_usec; \ trace_entry->time = timev.tv_usec; \
trace_entry->cpu = (unsigned char)(smp_processor_id() & 0xFF); \ trace_entry->cpu = (unsigned char)(smp_processor_id() & 0xFF);\
trace_entry->driver = (tdriver); \ trace_entry->driver = (tdriver); \
trace_entry->irql = trace_irql; \ trace_entry->irql = trace_irql; \
trace_entry->arg1 = (ulong)(a1); \ trace_entry->arg1 = (ulong)(a1); \
......
...@@ -12,82 +12,82 @@ ...@@ -12,82 +12,82 @@
/* UCODE Registers */ /* UCODE Registers */
struct sxg_ucode_regs { struct sxg_ucode_regs {
/* Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 */ /* Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 */
u32 Icr; /* Code = 0 (extended), ExCode = 0 - Int control */ u32 Icr; /* Code = 0 (extended), ExCode = 0 - Int control */
u32 RsvdReg1; /* Code = 1 - TOE -NA */ u32 RsvdReg1; /* Code = 1 - TOE -NA */
u32 RsvdReg2; /* Code = 2 - TOE -NA */ u32 RsvdReg2; /* Code = 2 - TOE -NA */
u32 RsvdReg3; /* Code = 3 - TOE -NA */ u32 RsvdReg3; /* Code = 3 - TOE -NA */
u32 RsvdReg4; /* Code = 4 - TOE -NA */ u32 RsvdReg4; /* Code = 4 - TOE -NA */
u32 RsvdReg5; /* Code = 5 - TOE -NA */ u32 RsvdReg5; /* Code = 5 - TOE -NA */
u32 CardUp; /* Code = 6 - Microcode initialized when 1 */ u32 CardUp; /* Code = 6 - Microcode initialized when 1 */
u32 RsvdReg7; /* Code = 7 - TOE -NA */ u32 RsvdReg7; /* Code = 7 - TOE -NA */
u32 ConfigStat; /* Code = 8 - Configuration data load status */ u32 ConfigStat; /* Code = 8 - Configuration data load status */
u32 RsvdReg9; /* Code = 9 - TOE -NA */ u32 RsvdReg9; /* Code = 9 - TOE -NA */
u32 CodeNotUsed[6]; /* Codes 10-15 not used. ExCode = 0 */ u32 CodeNotUsed[6]; /* Codes 10-15 not used. ExCode = 0 */
/* This brings us to ExCode 1 at address 0x40 = Interrupt status pointer */ /* This brings us to ExCode 1 at address 0x40 = Interrupt status pointer */
u32 Isp; /* Code = 0 (extended), ExCode = 1 */ u32 Isp; /* Code = 0 (extended), ExCode = 1 */
u32 PadEx1[15]; /* Codes 1-15 not used with extended codes */ u32 PadEx1[15]; /* Codes 1-15 not used with extended codes */
/* ExCode 2 = Interrupt Status Register */ /* ExCode 2 = Interrupt Status Register */
u32 Isr; /* Code = 0 (extended), ExCode = 2 */ u32 Isr; /* Code = 0 (extended), ExCode = 2 */
u32 PadEx2[15]; u32 PadEx2[15];
/* ExCode 3 = Event base register. Location of event rings */ /* ExCode 3 = Event base register. Location of event rings */
u32 EventBase; /* Code = 0 (extended), ExCode = 3 */ u32 EventBase; /* Code = 0 (extended), ExCode = 3 */
u32 PadEx3[15]; u32 PadEx3[15];
/* ExCode 4 = Event ring size */ /* ExCode 4 = Event ring size */
u32 EventSize; /* Code = 0 (extended), ExCode = 4 */ u32 EventSize; /* Code = 0 (extended), ExCode = 4 */
u32 PadEx4[15]; u32 PadEx4[15];
/* ExCode 5 = TCB Buffers base address */ /* ExCode 5 = TCB Buffers base address */
u32 TcbBase; /* Code = 0 (extended), ExCode = 5 */ u32 TcbBase; /* Code = 0 (extended), ExCode = 5 */
u32 PadEx5[15]; u32 PadEx5[15];
/* ExCode 6 = TCB Composite Buffers base address */ /* ExCode 6 = TCB Composite Buffers base address */
u32 TcbCompBase; /* Code = 0 (extended), ExCode = 6 */ u32 TcbCompBase; /* Code = 0 (extended), ExCode = 6 */
u32 PadEx6[15]; u32 PadEx6[15];
/* ExCode 7 = Transmit ring base address */ /* ExCode 7 = Transmit ring base address */
u32 XmtBase; /* Code = 0 (extended), ExCode = 7 */ u32 XmtBase; /* Code = 0 (extended), ExCode = 7 */
u32 PadEx7[15]; u32 PadEx7[15];
/* ExCode 8 = Transmit ring size */ /* ExCode 8 = Transmit ring size */
u32 XmtSize; /* Code = 0 (extended), ExCode = 8 */ u32 XmtSize; /* Code = 0 (extended), ExCode = 8 */
u32 PadEx8[15]; u32 PadEx8[15];
/* ExCode 9 = Receive ring base address */ /* ExCode 9 = Receive ring base address */
u32 RcvBase; /* Code = 0 (extended), ExCode = 9 */ u32 RcvBase; /* Code = 0 (extended), ExCode = 9 */
u32 PadEx9[15]; u32 PadEx9[15];
/* ExCode 10 = Receive ring size */ /* ExCode 10 = Receive ring size */
u32 RcvSize; /* Code = 0 (extended), ExCode = 10 */ u32 RcvSize; /* Code = 0 (extended), ExCode = 10 */
u32 PadEx10[15]; u32 PadEx10[15];
/* ExCode 11 = Read EEPROM/Flash Config */ /* ExCode 11 = Read EEPROM/Flash Config */
u32 Config; /* Code = 0 (extended), ExCode = 11 */ u32 Config; /* Code = 0 (extended), ExCode = 11 */
u32 PadEx11[15]; u32 PadEx11[15];
/* ExCode 12 = Multicast bits 31:0 */ /* ExCode 12 = Multicast bits 31:0 */
u32 McastLow; /* Code = 0 (extended), ExCode = 12 */ u32 McastLow; /* Code = 0 (extended), ExCode = 12 */
u32 PadEx12[15]; u32 PadEx12[15];
/* ExCode 13 = Multicast bits 63:32 */ /* ExCode 13 = Multicast bits 63:32 */
u32 McastHigh; /* Code = 0 (extended), ExCode = 13 */ u32 McastHigh; /* Code = 0 (extended), ExCode = 13 */
u32 PadEx13[15]; u32 PadEx13[15];
/* ExCode 14 = Ping */ /* ExCode 14 = Ping */
u32 Ping; /* Code = 0 (extended), ExCode = 14 */ u32 Ping; /* Code = 0 (extended), ExCode = 14 */
u32 PadEx14[15]; u32 PadEx14[15];
/* ExCode 15 = Link MTU */ /* ExCode 15 = Link MTU */
u32 LinkMtu; /* Code = 0 (extended), ExCode = 15 */ u32 LinkMtu; /* Code = 0 (extended), ExCode = 15 */
u32 PadEx15[15]; u32 PadEx15[15];
/* ExCode 16 = Download synchronization */ /* ExCode 16 = Download synchronization */
u32 LoadSync; /* Code = 0 (extended), ExCode = 16 */ u32 LoadSync; /* Code = 0 (extended), ExCode = 16 */
u32 PadEx16[15]; u32 PadEx16[15];
/* ExCode 17 = Upper DRAM address bits on 32-bit systems */ /* ExCode 17 = Upper DRAM address bits on 32-bit systems */
u32 Upper; /* Code = 0 (extended), ExCode = 17 */ u32 Upper; /* Code = 0 (extended), ExCode = 17 */
u32 PadEx17[15]; u32 PadEx17[15];
/* ExCode 18 = Slowpath Send Index Address */ /* ExCode 18 = Slowpath Send Index Address */
u32 SPSendIndex; /* Code = 0 (extended), ExCode = 18 */ u32 SPSendIndex; /* Code = 0 (extended), ExCode = 18 */
u32 PadEx18[15]; u32 PadEx18[15];
/* ExCode 19 = Get ucode statistics */ /* ExCode 19 = Get ucode statistics */
u32 GetUcodeStats; /* Code = 0 (extended), ExCode = 19 */ u32 GetUcodeStats; /* Code = 0 (extended), ExCode = 19 */
u32 PadEx19[15]; u32 PadEx19[15];
/* ExCode 20 = Aggregation - See sxgmisc.c:SxgSetInterruptAggregation */ /* ExCode 20 = Aggregation - See sxgmisc.c:SxgSetInterruptAggregation */
u32 Aggregation; /* Code = 0 (extended), ExCode = 20 */ u32 Aggregation; /* Code = 0 (extended), ExCode = 20 */
u32 PadEx20[15]; u32 PadEx20[15];
/* ExCode 21 = Receive MDL push timer */ /* ExCode 21 = Receive MDL push timer */
u32 PushTicks; /* Code = 0 (extended), ExCode = 21 */ u32 PushTicks; /* Code = 0 (extended), ExCode = 21 */
u32 PadEx21[15]; u32 PadEx21[15];
/* ExCode 22 = ACK Frequency */ /* ExCode 22 = ACK Frequency */
u32 AckFrequency; /* Code = 0 (extended), ExCode = 22 */ u32 AckFrequency; /* Code = 0 (extended), ExCode = 22 */
u32 PadEx22[15]; u32 PadEx22[15];
/* ExCode 23 = TOE NA */ /* ExCode 23 = TOE NA */
u32 RsvdReg23; u32 RsvdReg23;
...@@ -96,31 +96,31 @@ struct sxg_ucode_regs { ...@@ -96,31 +96,31 @@ struct sxg_ucode_regs {
u32 RsvdReg24; u32 RsvdReg24;
u32 PadEx24[15]; u32 PadEx24[15];
/* ExCode 25 = TOE NA */ /* ExCode 25 = TOE NA */
u32 RsvdReg25; /* Code = 0 (extended), ExCode = 25 */ u32 RsvdReg25; /* Code = 0 (extended), ExCode = 25 */
u32 PadEx25[15]; u32 PadEx25[15];
/* ExCode 26 = Receive checksum requirements */ /* ExCode 26 = Receive checksum requirements */
u32 ReceiveChecksum; /* Code = 0 (extended), ExCode = 26 */ u32 ReceiveChecksum; /* Code = 0 (extended), ExCode = 26 */
u32 PadEx26[15]; u32 PadEx26[15];
/* ExCode 27 = RSS Requirements */ /* ExCode 27 = RSS Requirements */
u32 Rss; /* Code = 0 (extended), ExCode = 27 */ u32 Rss; /* Code = 0 (extended), ExCode = 27 */
u32 PadEx27[15]; u32 PadEx27[15];
/* ExCode 28 = RSS Table */ /* ExCode 28 = RSS Table */
u32 RssTable; /* Code = 0 (extended), ExCode = 28 */ u32 RssTable; /* Code = 0 (extended), ExCode = 28 */
u32 PadEx28[15]; u32 PadEx28[15];
/* ExCode 29 = Event ring release entries */ /* ExCode 29 = Event ring release entries */
u32 EventRelease; /* Code = 0 (extended), ExCode = 29 */ u32 EventRelease; /* Code = 0 (extended), ExCode = 29 */
u32 PadEx29[15]; u32 PadEx29[15];
/* ExCode 30 = Number of receive bufferlist commands on ring 0 */ /* ExCode 30 = Number of receive bufferlist commands on ring 0 */
u32 RcvCmd; /* Code = 0 (extended), ExCode = 30 */ u32 RcvCmd; /* Code = 0 (extended), ExCode = 30 */
u32 PadEx30[15]; u32 PadEx30[15];
/* ExCode 31 = slowpath transmit command - Data[31:0] = 1 */ /* ExCode 31 = slowpath transmit command - Data[31:0] = 1 */
u32 XmtCmd; /* Code = 0 (extended), ExCode = 31 */ u32 XmtCmd; /* Code = 0 (extended), ExCode = 31 */
u32 PadEx31[15]; u32 PadEx31[15];
/* ExCode 32 = Dump command */ /* ExCode 32 = Dump command */
u32 DumpCmd; /* Code = 0 (extended), ExCode = 32 */ u32 DumpCmd; /* Code = 0 (extended), ExCode = 32 */
u32 PadEx32[15]; u32 PadEx32[15];
/* ExCode 33 = Debug command */ /* ExCode 33 = Debug command */
u32 DebugCmd; /* Code = 0 (extended), ExCode = 33 */ u32 DebugCmd; /* Code = 0 (extended), ExCode = 33 */
u32 PadEx33[15]; u32 PadEx33[15];
/* /*
* There are 128 possible extended commands - each of account for 16 * There are 128 possible extended commands - each of account for 16
...@@ -129,7 +129,7 @@ struct sxg_ucode_regs { ...@@ -129,7 +129,7 @@ struct sxg_ucode_regs {
* base. As extended codes are added, reduce the first array value in * base. As extended codes are added, reduce the first array value in
* the following field * the following field
*/ */
u32 PadToNextCpu[94][16]; /* 94 = 128 - 34 (34 = Excodes 0 - 33) */ u32 PadToNextCpu[94][16]; /* 94 = 128 - 34 (34 = Excodes 0 - 33)*/
}; };
/* Interrupt control register (0) values */ /* Interrupt control register (0) values */
...@@ -142,10 +142,11 @@ struct sxg_ucode_regs { ...@@ -142,10 +142,11 @@ struct sxg_ucode_regs {
((((_MessageId) << SXG_ICR_MSGID_SHIFT) & \ ((((_MessageId) << SXG_ICR_MSGID_SHIFT) & \
SXG_ICR_MSGID_MASK) | (_Data)) SXG_ICR_MSGID_MASK) | (_Data))
#define SXG_MIN_AGG_DEFAULT 0x0010 /* Minimum aggregation default */ #define SXG_MIN_AGG_DEFAULT 0x0010 /* Minimum aggregation default */
#define SXG_MAX_AGG_DEFAULT 0x0040 /* Maximum aggregation default */ #define SXG_MAX_AGG_DEFAULT 0x0040 /* Maximum aggregation default */
#define SXG_MAX_AGG_SHIFT 16 /* Maximum in top 16 bits of register */ #define SXG_MAX_AGG_SHIFT 16 /* Maximum in top 16 bits of register */
#define SXG_AGG_XMT_DISABLE 0x80000000 /* Disable interrupt aggregation on xmt */ /* Disable interrupt aggregation on xmt */
#define SXG_AGG_XMT_DISABLE 0x80000000
/* The Microcode supports up to 8 RSS queues */ /* The Microcode supports up to 8 RSS queues */
#define SXG_MAX_RSS 8 #define SXG_MAX_RSS 8
...@@ -170,11 +171,11 @@ struct sxg_ucode_regs { ...@@ -170,11 +171,11 @@ struct sxg_ucode_regs {
* Status returned by ucode in the ConfigStat reg (see above) when attempted * Status returned by ucode in the ConfigStat reg (see above) when attempted
* to load configuration data from the EEPROM/Flash. * to load configuration data from the EEPROM/Flash.
*/ */
#define SXG_CFG_TIMEOUT 1 /* init value - timeout if unchanged */ #define SXG_CFG_TIMEOUT 1 /* init value - timeout if unchanged */
#define SXG_CFG_LOAD_EEPROM 2 /* config data loaded from EEPROM */ #define SXG_CFG_LOAD_EEPROM 2 /* config data loaded from EEPROM */
#define SXG_CFG_LOAD_FLASH 3 /* config data loaded from flash */ #define SXG_CFG_LOAD_FLASH 3 /* config data loaded from flash */
#define SXG_CFG_LOAD_INVALID 4 /* no valid config data found */ #define SXG_CFG_LOAD_INVALID 4 /* no valid config data found */
#define SXG_CFG_LOAD_ERROR 5 /* hardware error */ #define SXG_CFG_LOAD_ERROR 5 /* hardware error */
#define SXG_CHECK_FOR_HANG_TIME 5 #define SXG_CHECK_FOR_HANG_TIME 5
...@@ -186,17 +187,17 @@ struct sxg_ucode_regs { ...@@ -186,17 +187,17 @@ struct sxg_ucode_regs {
* struct sxg_ucode_regs definition above * struct sxg_ucode_regs definition above
*/ */
struct sxg_tcb_regs { struct sxg_tcb_regs {
u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */ u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */ u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */ u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
u32 Rsvd1; /* Code = 3 - TOE NA */ u32 Rsvd1; /* Code = 3 - TOE NA */
u32 Rsvd2; /* Code = 4 - TOE NA */ u32 Rsvd2; /* Code = 4 - TOE NA */
u32 Rsvd3; /* Code = 5 - TOE NA */ u32 Rsvd3; /* Code = 5 - TOE NA */
u32 Invalid1; /* Code = 6 - Reserved for "CardUp" see above */ u32 Invalid1; /* Code = 6 - Reserved for "CardUp" see above */
u32 Rsvd4; /* Code = 7 - TOE NA */ u32 Rsvd4; /* Code = 7 - TOE NA */
u32 Invalid2; /* Code = 8 - Reserved for "ConfigStat" see above */ u32 Invalid2; /* Code = 8 - Reserved for "ConfigStat" see above */
u32 Rsvd5; /* Code = 9 - TOE NA */ u32 Rsvd5; /* Code = 9 - TOE NA */
u32 Pad[6]; /* Codes 10-15 - Not used. */ u32 Pad[6]; /* Codes 10-15 - Not used. */
}; };
/*************************************************************************** /***************************************************************************
...@@ -226,7 +227,7 @@ struct sxg_tcb_regs { ...@@ -226,7 +227,7 @@ struct sxg_tcb_regs {
#define SXG_ISR_ERR 0x80000000 /* Error */ #define SXG_ISR_ERR 0x80000000 /* Error */
#define SXG_ISR_EVENT 0x40000000 /* Event ring event */ #define SXG_ISR_EVENT 0x40000000 /* Event ring event */
#define SXG_ISR_NONE1 0x20000000 /* Not used */ #define SXG_ISR_NONE1 0x20000000 /* Not used */
#define SXG_ISR_UPC 0x10000000 /* Dump/debug command complete */ #define SXG_ISR_UPC 0x10000000 /* Dump/debug command complete*/
#define SXG_ISR_LINK 0x08000000 /* Link event */ #define SXG_ISR_LINK 0x08000000 /* Link event */
#define SXG_ISR_PDQF 0x04000000 /* Processed data queue full */ #define SXG_ISR_PDQF 0x04000000 /* Processed data queue full */
#define SXG_ISR_RMISS 0x02000000 /* Drop - no host buf */ #define SXG_ISR_RMISS 0x02000000 /* Drop - no host buf */
...@@ -335,7 +336,8 @@ struct sxg_event { ...@@ -335,7 +336,8 @@ struct sxg_event {
*/ */
#define EVENT_RING_SIZE 4096 #define EVENT_RING_SIZE 4096
#define EVENT_RING_BATCH 16 /* Hand entries back 16 at a time. */ #define EVENT_RING_BATCH 16 /* Hand entries back 16 at a time. */
#define EVENT_BATCH_LIMIT 256 /* Stop processing events after 4096 (256 * 16) */ /* Stop processing events after 4096 (256 * 16) */
#define EVENT_BATCH_LIMIT 256
struct sxg_event_ring { struct sxg_event_ring {
struct sxg_event Ring[EVENT_RING_SIZE]; struct sxg_event Ring[EVENT_RING_SIZE];
...@@ -352,34 +354,34 @@ struct sxg_event_ring { ...@@ -352,34 +354,34 @@ struct sxg_event_ring {
* offloaded connections, 10:4 if we support 2k and so on. * offloaded connections, 10:4 if we support 2k and so on.
*/ */
#define SXG_TCB_BUCKET_SHIFT 4 #define SXG_TCB_BUCKET_SHIFT 4
#define SXG_TCB_PER_BUCKET 16 #define SXG_TCB_PER_BUCKET 16
#define SXG_TCB_BUCKET_MASK 0xFF0 /* Bucket portion of TCB ID */ #define SXG_TCB_BUCKET_MASK 0xFF0 /* Bucket portion of TCB ID */
#define SXG_TCB_ELEMENT_MASK 0x00F /* Element within bucket */ #define SXG_TCB_ELEMENT_MASK 0x00F /* Element within bucket */
#define SXG_TCB_BUCKETS 256 /* 256 * 16 = 4k */ #define SXG_TCB_BUCKETS 256 /* 256 * 16 = 4k */
#define SXG_TCB_BUFFER_SIZE 512 /* ASSERT format is correct */ #define SXG_TCB_BUFFER_SIZE 512 /* ASSERT format is correct */
#define SXG_TCB_RCVQ_SIZE 736 #define SXG_TCB_RCVQ_SIZE 736
#define SXG_TCB_COMPOSITE_BUFFER_SIZE 1024 #define SXG_TCB_COMPOSITE_BUFFER_SIZE 1024
#define SXG_LOCATE_TCP_FRAME_HDR(_TcpObject, _IPv6) \ #define SXG_LOCATE_TCP_FRAME_HDR(_TcpObject, _IPv6) \
(((_TcpObject)->VlanId) ? \ (((_TcpObject)->VlanId) ? \
((_IPv6) ? /* Vlan frame header = yes */ \ ((_IPv6) ? /* Vlan frame header = yes */ \
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.SxgTcp : \ &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.SxgTcp: \
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.SxgTcp) : \ &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.SxgTcp): \
((_IPv6) ? /* Vlan frame header = No */ \ ((_IPv6) ? /* Vlan frame header = No */ \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.SxgTcp : \ &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.SxgTcp : \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.SxgTcp)) &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.SxgTcp))
#define SXG_LOCATE_IP_FRAME_HDR(_TcpObject) \ #define SXG_LOCATE_IP_FRAME_HDR(_TcpObject) \
(_TcpObject)->VlanId ? \ (_TcpObject)->VlanId ? \
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.Ip : \ &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.Ip: \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.Ip &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.Ip
#define SXG_LOCATE_IP6_FRAME_HDR(_TcpObject) \ #define SXG_LOCATE_IP6_FRAME_HDR(TcpObject) \
(_TcpObject)->VlanId ? \ (_TcpObject)->VlanId ? \
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \ &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip: \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
#if DBG #if DBG
...@@ -391,16 +393,18 @@ struct sxg_event_ring { ...@@ -391,16 +393,18 @@ struct sxg_event_ring {
* Obviously this is DBG only. Maybe remove later, or #if 0 so we * Obviously this is DBG only. Maybe remove later, or #if 0 so we
* can set it when needed * can set it when needed
*/ */
#define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) { \ #define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) { \
PIPV6_HDR _Ip6FrameHdr; \ PIPV6_HDR _Ip6FrameHdr; \
if((_TcpObject)->IPv6) { \ if ((_TcpObject)->IPv6) { \
_Ip6FrameHdr = SXG_LOCATE_IP6_FRAME_HDR((_TcpObject)); \ _Ip6FrameHdr = SXG_LOCATE_IP6_FRAME_HDR((_TcpObject)); \
if(_FastPath) { \ if (_FastPath) { \
_Ip6FrameHdr->HopLimit = (_TcpObject)->Cached.TtlOrHopLimit - 2; \ _Ip6FrameHdr->HopLimit = \
} else { \ (_TcpObject)->Cached.TtlOrHopLimit - 2; \
_Ip6FrameHdr->HopLimit = (_TcpObject)->Cached.TtlOrHopLimit - 1; \ } else { \
} \ _Ip6FrameHdr->HopLimit = \
} \ (_TcpObject)->Cached.TtlOrHopLimit - 1; \
} \
} \
} }
#else #else
/* Do nothing with free build */ /* Do nothing with free build */
...@@ -415,41 +419,47 @@ struct sxg_event_ring { ...@@ -415,41 +419,47 @@ struct sxg_event_ring {
/* Structure and macros to manage a ring */ /* Structure and macros to manage a ring */
struct sxg_ring_info { struct sxg_ring_info {
unsigned char Head; /* Where we add entries - Note unsigned char:RING_SIZE */ /* Where we add entries - Note unsigned char:RING_SIZE */
unsigned char Head;
unsigned char Tail; /* Where we pull off completed entries */ unsigned char Tail; /* Where we pull off completed entries */
ushort Size; /* Ring size - Must be multiple of 2 */ ushort Size; /* Ring size - Must be multiple of 2 */
void * Context[SXG_MAX_RING_SIZE]; /* Shadow ring */ void * Context[SXG_MAX_RING_SIZE]; /* Shadow ring */
}; };
#define SXG_INITIALIZE_RING(_ring, _size) { \ #define SXG_INITIALIZE_RING(_ring, _size) { \
(_ring).Head = 0; \ (_ring).Head = 0; \
(_ring).Tail = 0; \ (_ring).Tail = 0; \
(_ring).Size = (_size); \ (_ring).Size = (_size); \
} }
#define SXG_ADVANCE_INDEX(_index, _size) ((_index) = ((_index) + 1) & ((_size) - 1))
#define SXG_PREVIOUS_INDEX(_index, _size) (((_index) - 1) &((_size) - 1)) #define SXG_ADVANCE_INDEX(_index, _size) \
((_index) = ((_index) + 1) & ((_size) - 1))
#define SXG_PREVIOUS_INDEX(_index, _size) \
(((_index) - 1) &((_size) - 1))
#define SXG_RING_EMPTY(_ring) ((_ring)->Head == (_ring)->Tail) #define SXG_RING_EMPTY(_ring) ((_ring)->Head == (_ring)->Tail)
#define SXG_RING_FULL(_ring) ((((_ring)->Head + 1) & ((_ring)->Size - 1)) == (_ring)->Tail) #define SXG_RING_FULL(_ring) \
#define SXG_RING_ADVANCE_HEAD(_ring) SXG_ADVANCE_INDEX((_ring)->Head, ((_ring)->Size)) ((((_ring)->Head + 1) & ((_ring)->Size - 1)) == (_ring)->Tail)
#define SXG_RING_RETREAT_HEAD(_ring) ((_ring)->Head = \ #define SXG_RING_ADVANCE_HEAD(_ring) \
SXG_PREVIOUS_INDEX((_ring)->Head, (_ring)->Size)) SXG_ADVANCE_INDEX((_ring)->Head, ((_ring)->Size))
#define SXG_RING_ADVANCE_TAIL(_ring) { \ #define SXG_RING_RETREAT_HEAD(_ring) ((_ring)->Head = \
ASSERT((_ring)->Tail != (_ring)->Head); \ SXG_PREVIOUS_INDEX((_ring)->Head, (_ring)->Size))
SXG_ADVANCE_INDEX((_ring)->Tail, ((_ring)->Size)); \ #define SXG_RING_ADVANCE_TAIL(_ring) { \
ASSERT((_ring)->Tail != (_ring)->Head); \
SXG_ADVANCE_INDEX((_ring)->Tail, ((_ring)->Size)); \
} }
/* /*
* Set cmd to the next available ring entry, set the shadow context * Set cmd to the next available ring entry, set the shadow context
* entry and advance the ring. * entry and advance the ring.
* The appropriate lock must be held when calling this macro * The appropriate lock must be held when calling this macro
*/ */
#define SXG_GET_CMD(_ring, _ringinfo, _cmd, _context) { \ #define SXG_GET_CMD(_ring, _ringinfo, _cmd, _context) { \
if(SXG_RING_FULL(_ringinfo)) { \ if(SXG_RING_FULL(_ringinfo)) { \
(_cmd) = NULL; \ (_cmd) = NULL; \
} else { \ } else { \
(_cmd) = &(_ring)->Descriptors[(_ringinfo)->Head]; \ (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Head]; \
(_ringinfo)->Context[(_ringinfo)->Head] = (void *)(_context);\ (_ringinfo)->Context[(_ringinfo)->Head] = (void *)(_context);\
SXG_RING_ADVANCE_HEAD(_ringinfo); \ SXG_RING_ADVANCE_HEAD(_ringinfo); \
} \ } \
} }
/* /*
...@@ -457,21 +467,21 @@ struct sxg_ring_info { ...@@ -457,21 +467,21 @@ struct sxg_ring_info {
* NOTE - The appopriate lock MUST NOT BE DROPPED between the SXG_GET_CMD * NOTE - The appopriate lock MUST NOT BE DROPPED between the SXG_GET_CMD
* and SXG_ABORT_CMD calls. * and SXG_ABORT_CMD calls.
*/ */
#define SXG_ABORT_CMD(_ringinfo) { \ #define SXG_ABORT_CMD(_ringinfo) { \
ASSERT(!(SXG_RING_EMPTY(_ringinfo))); \ ASSERT(!(SXG_RING_EMPTY(_ringinfo))); \
SXG_RING_RETREAT_HEAD(_ringinfo); \ SXG_RING_RETREAT_HEAD(_ringinfo); \
(_ringinfo)->Context[(_ringinfo)->Head] = NULL; \ (_ringinfo)->Context[(_ringinfo)->Head] = NULL; \
} }
/* /*
* For the given ring, return a pointer to the tail cmd and context, * For the given ring, return a pointer to the tail cmd and context,
* clear the context and advance the tail * clear the context and advance the tail
*/ */
#define SXG_RETURN_CMD(_ring, _ringinfo, _cmd, _context) { \ #define SXG_RETURN_CMD(_ring, _ringinfo, _cmd, _context) { \
(_cmd) = &(_ring)->Descriptors[(_ringinfo)->Tail]; \ (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Tail]; \
(_context) = (_ringinfo)->Context[(_ringinfo)->Tail]; \ (_context) = (_ringinfo)->Context[(_ringinfo)->Tail]; \
(_ringinfo)->Context[(_ringinfo)->Tail] = NULL; \ (_ringinfo)->Context[(_ringinfo)->Tail] = NULL; \
SXG_RING_ADVANCE_TAIL(_ringinfo); \ SXG_RING_ADVANCE_TAIL(_ringinfo); \
} }
/*************************************************************** /***************************************************************
...@@ -507,7 +517,8 @@ struct sxg_cmd { ...@@ -507,7 +517,8 @@ struct sxg_cmd {
union { union {
u32 Rsvd1; /* TOE NA */ u32 Rsvd1; /* TOE NA */
u32 SgeOffset; /* Slowpath - 2nd SGE offset */ u32 SgeOffset; /* Slowpath - 2nd SGE offset */
u32 Resid; /* MDL completion - clobbers update */ /* MDL completion - clobbers update */
u32 Resid;
}; };
union { union {
u32 TotalLength; /* Total transfer length */ u32 TotalLength; /* Total transfer length */
...@@ -639,10 +650,10 @@ enum sxg_buffer_type { ...@@ -639,10 +650,10 @@ enum sxg_buffer_type {
* Further complicating matters is the fact that the receive * Further complicating matters is the fact that the receive
* buffers must be variable in length in order to accomodate * buffers must be variable in length in order to accomodate
* jumbo frame configurations. We configure the buffer * jumbo frame configurations. We configure the buffer
* length so that the buffer and it's corresponding struct sxg_rcv_data_buffer_hdr * length so that the buffer and it's corresponding struct
* structure add up to an even boundary. Then we place the * sxg_rcv_data_buffer_hdr structure add up to an even
* remaining data structures after 128 of them as shown in * boundary. Then we place the remaining data structures after 128
* the following diagram: * of them as shown in the following diagram:
* *
* _________________________________________ * _________________________________________
* | | * | |
...@@ -683,7 +694,8 @@ enum sxg_buffer_type { ...@@ -683,7 +694,8 @@ enum sxg_buffer_type {
*/ */
#define SXG_RCV_DATA_BUFFERS 8192 /* Amount to give to the card */ #define SXG_RCV_DATA_BUFFERS 8192 /* Amount to give to the card */
#define SXG_INITIAL_RCV_DATA_BUFFERS 16384 /* Initial pool of buffers */ #define SXG_INITIAL_RCV_DATA_BUFFERS 16384 /* Initial pool of buffers */
#define SXG_MIN_RCV_DATA_BUFFERS 4096 /* Minimum amount and when to get more */ /* Minimum amount and when to get more */
#define SXG_MIN_RCV_DATA_BUFFERS 4096
#define SXG_MAX_RCV_BLOCKS 256 /* = 32k receive buffers */ #define SXG_MAX_RCV_BLOCKS 256 /* = 32k receive buffers */
/* Receive buffer header */ /* Receive buffer header */
...@@ -699,7 +711,7 @@ struct sxg_rcv_data_buffer_hdr { ...@@ -699,7 +711,7 @@ struct sxg_rcv_data_buffer_hdr {
struct list_entry FreeList; /* Free queue of buffers */ struct list_entry FreeList; /* Free queue of buffers */
unsigned char State; /* See SXG_BUFFER state above */ unsigned char State; /* See SXG_BUFFER state above */
unsigned char Status; /* Event status (to log PUSH) */ unsigned char Status; /* Event status (to log PUSH) */
struct sk_buff * skb; /* Double mapped (nbl and pkt) */ struct sk_buff * skb; /* Double mapped (nbl and pkt)*/
}; };
/* /*
...@@ -708,15 +720,17 @@ struct sxg_rcv_data_buffer_hdr { ...@@ -708,15 +720,17 @@ struct sxg_rcv_data_buffer_hdr {
*/ */
#define SxgDumbRcvPacket skb #define SxgDumbRcvPacket skb
#define SXG_RCV_DATA_HDR_SIZE 256 /* Space for struct sxg_rcv_data_buffer_hdr */ /* Space for struct sxg_rcv_data_buffer_hdr */
#define SXG_RCV_DATA_BUFFER_SIZE 2048 /* Non jumbo = 2k including HDR */ #define SXG_RCV_DATA_HDR_SIZE 256
#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 /* jumbo = 10k including HDR */ /* Non jumbo = 2k including HDR */
#define SXG_RCV_DATA_BUFFER_SIZE 2048
#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 /* jumbo = 10k including HDR */
/* Receive data descriptor */ /* Receive data descriptor */
struct sxg_rcv_data_descriptor { struct sxg_rcv_data_descriptor {
union { union {
struct sk_buff *VirtualAddress; /* Host handle */ struct sk_buff *VirtualAddress; /* Host handle */
u64 ForceTo8Bytes; /* Force x86 to 8-byte boundary */ u64 ForceTo8Bytes; /*Force x86 to 8-byte boundary*/
}; };
dma_addr_t PhysicalAddress; dma_addr_t PhysicalAddress;
}; };
...@@ -731,32 +745,32 @@ struct sxg_rcv_descriptor_block { ...@@ -731,32 +745,32 @@ struct sxg_rcv_descriptor_block {
/* Receive descriptor block header */ /* Receive descriptor block header */
struct sxg_rcv_descriptor_block_hdr { struct sxg_rcv_descriptor_block_hdr {
void *VirtualAddress; /* start of 2k buffer */ void *VirtualAddress; /* start of 2k buffer */
dma_addr_t PhysicalAddress; /* ..and it's physical address */ dma_addr_t PhysicalAddress; /* ..and it's physical address */
struct list_entry FreeList; /* free queue of descriptor blocks */ struct list_entry FreeList;/* free queue of descriptor blocks */
unsigned char State; /* see sxg_buffer state above */ unsigned char State; /* see sxg_buffer state above */
}; };
/* Receive block header */ /* Receive block header */
struct sxg_rcv_block_hdr { struct sxg_rcv_block_hdr {
void *VirtualAddress; /* Start of virtual memory */ void *VirtualAddress; /* Start of virtual memory */
dma_addr_t PhysicalAddress; /* ..and it's physical address */ dma_addr_t PhysicalAddress; /* ..and it's physical address*/
struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS */ struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS*/
}; };
/* Macros to determine data structure offsets into receive block */ /* Macros to determine data structure offsets into receive block */
#define SXG_RCV_BLOCK_SIZE(_Buffersize) \ #define SXG_RCV_BLOCK_SIZE(_Buffersize) \
(((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
(sizeof(struct sxg_rcv_descriptor_block)) + \ (sizeof(struct sxg_rcv_descriptor_block)) + \
(sizeof(struct sxg_rcv_descriptor_block_hdr)) + \ (sizeof(struct sxg_rcv_descriptor_block_hdr)) + \
(sizeof(struct sxg_rcv_block_hdr))) (sizeof(struct sxg_rcv_block_hdr)))
#define SXG_RCV_BUFFER_DATA_SIZE(_Buffersize) \ #define SXG_RCV_BUFFER_DATA_SIZE(_Buffersize) \
((_Buffersize) - SXG_RCV_DATA_HDR_SIZE) ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE)
#define SXG_RCV_DATA_BUFFER_HDR_OFFSET(_Buffersize) \ #define SXG_RCV_DATA_BUFFER_HDR_OFFSET(_Buffersize) \
((_Buffersize) - SXG_RCV_DATA_HDR_SIZE) ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE)
#define SXG_RCV_DESCRIPTOR_BLOCK_OFFSET(_Buffersize) \ #define SXG_RCV_DESCRIPTOR_BLOCK_OFFSET(_Buffersize) \
((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) ((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK)
#define SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET(_Buffersize) \ #define SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET(_Buffersize) \
(((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
(sizeof(struct sxg_rcv_descriptor_block))) (sizeof(struct sxg_rcv_descriptor_block)))
#define SXG_RCV_BLOCK_HDR_OFFSET(_Buffersize) \ #define SXG_RCV_BLOCK_HDR_OFFSET(_Buffersize) \
...@@ -766,12 +780,13 @@ struct sxg_rcv_block_hdr { ...@@ -766,12 +780,13 @@ struct sxg_rcv_block_hdr {
/* Scatter gather list buffer */ /* Scatter gather list buffer */
#define SXG_INITIAL_SGL_BUFFERS 8192 /* Initial pool of SGL buffers */ #define SXG_INITIAL_SGL_BUFFERS 8192 /* Initial pool of SGL buffers */
#define SXG_MIN_SGL_BUFFERS 2048 /* Minimum amount and when to get more */ #define SXG_MIN_SGL_BUFFERS 2048 /* Minimum amount and when to get more*/
#define SXG_MAX_SGL_BUFFERS 16384 /* Maximum to allocate (note ADAPT:ushort) */ /* Maximum to allocate (note ADAPT:ushort) */
#define SXG_MAX_SGL_BUFFERS 16384
/* /*
* SXG_SGL_POOL_PROPERTIES - This structure is used to define a pool of SGL buffers. * SXG_SGL_POOL_PROPERTIES - This structure is used to define a pool of SGL
* These buffers are allocated out of shared memory and used to * buffers. These buffers are allocated out of shared memory and used to
* contain a physical scatter gather list structure that is shared * contain a physical scatter gather list structure that is shared
* with the card. * with the card.
* *
...@@ -801,39 +816,38 @@ struct sxg_sgl_pool_properties { ...@@ -801,39 +816,38 @@ struct sxg_sgl_pool_properties {
/* /*
* At the moment I'm going to statically initialize 4 pools: * At the moment I'm going to statically initialize 4 pools:
* 100k buffer pool: The vast majority of the expected buffers are expected to * 100k buffer pool: The vast majority of the expected buffers are expected
* be less than or equal to 100k. At 30 entries per and * to be less than or equal to 100k. At 30 entries per and
* 8k initial buffers amounts to ~4MB of memory * 8k initial buffers amounts to ~4MB of memory
* NOTE - This used to be 64K with 20 entries, but during * NOTE - This used to be 64K with 20 entries, but during
* WHQL NDIS 6.0 Testing (2c_mini6stress) MS does their * WHQL NDIS 6.0 Testing (2c_mini6stress) MS does their
* best to send absurd NBL's with ridiculous SGLs, we * best to send absurd NBL's with ridiculous SGLs, we
* have received 400byte sends contained in SGL's that * have received 400byte sends contained in SGL's that
* have 28 entries * have 28 entries
* 1M buffer pool: Buffers between 64k and 1M. Allocate 256 initial buffers * 1M buffer pool: Buffers between 64k and 1M. Allocate 256 initial
* with 300 entries each => ~2MB of memory * buffers with 300 entries each => ~2MB of memory
* 5M buffer pool: Not expected often, if at all. 32 initial buffers * 5M buffer pool: Not expected often, if at all. 32 initial buffers
* at 1500 entries each => ~1MB of memory * at 1500 entries each => ~1MB of memory
* 10M buffer pool: Not expected at all, except under pathelogical conditions. * 10M buffer pool: Not expected at all, except under pathelogical conditions.
* Allocate one at initialization time. * Allocate one at initialization time.
* Note - 10M is the current limit of what we can * Note - 10M is the current limit of what we can realistically
* realistically support due to the sahara SGL * support due to the sahara SGL bug described in the
* bug described in the SAHARA SGL WORKAROUND below * SAHARA SGL WORKAROUND below. We will likely adjust the
* * number of pools and/or pool properties over time.
* We will likely adjust the number of pools and/or pool properties over time..
*/ */
#define SXG_NUM_SGL_POOLS 4 #define SXG_NUM_SGL_POOLS 4
#define INITIALIZE_SGL_POOL_PROPERTIES \ #define INITIALIZE_SGL_POOL_PROPERTIES \
struct sxg_sgl_pool_properties SxgSglPoolProperties[SXG_NUM_SGL_POOLS] = \ struct sxg_sgl_pool_properties SxgSglPoolProperties[SXG_NUM_SGL_POOLS] =\
{ \ { \
{ 102400, 30, 8192, 2048, 16384, 256}, \ { 102400, 30, 8192, 2048, 16384, 256}, \
{ 1048576, 300, 256, 128, 1024, 16}, \ { 1048576, 300, 256, 128, 1024, 16}, \
{ 5252880, 1500, 32, 16, 512, 0}, \ { 5252880, 1500, 32, 16, 512, 0}, \
{10485760, 2700, 2, 4, 32, 0}, \ {10485760, 2700, 2, 4, 32, 0}, \
}; };
extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; extern struct sxg_sgl_pool_properties SxgSglPoolProperties[];
#define SXG_MAX_SGL_BUFFER_SIZE \ #define SXG_MAX_SGL_BUFFER_SIZE \
SxgSglPoolProperties[SXG_NUM_SGL_POOLS - 1].NBSize SxgSglPoolProperties[SXG_NUM_SGL_POOLS - 1].NBSize
/* /*
...@@ -847,9 +861,9 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; ...@@ -847,9 +861,9 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[];
* We currently workaround this issue by allocating SGL buffers * We currently workaround this issue by allocating SGL buffers
* in 64k blocks and skipping over buffers that straddle the boundary. * in 64k blocks and skipping over buffers that straddle the boundary.
*/ */
#define SXG_INVALID_SGL(_SxgSgl) \ #define SXG_INVALID_SGL(_SxgSgl) \
(((_SxgSgl)->PhysicalAddress.LowPart & 0xFFFF0000) != \ (((_SxgSgl)->PhysicalAddress.LowPart & 0xFFFF0000) != \
(((_SxgSgl)->PhysicalAddress.LowPart + \ (((_SxgSgl)->PhysicalAddress.LowPart + \
SXG_SGL_SIZE((_SxgSgl)->Pool)) & 0xFFFF0000)) SXG_SGL_SIZE((_SxgSgl)->Pool)) & 0xFFFF0000))
/* /*
...@@ -858,17 +872,19 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; ...@@ -858,17 +872,19 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[];
* struct sxg_sgl_block_hdr, plus one for padding * struct sxg_sgl_block_hdr, plus one for padding
*/ */
#define SXG_SGL_BLOCK_SIZE 65536 #define SXG_SGL_BLOCK_SIZE 65536
#define SXG_SGL_ALLOCATION_SIZE(_Pool) SXG_SGL_BLOCK_SIZE + SXG_SGL_SIZE(_Pool) #define SXG_SGL_ALLOCATION_SIZE(_Pool) \
SXG_SGL_BLOCK_SIZE + SXG_SGL_SIZE(_Pool)
struct sxg_sgl_block_hdr { struct sxg_sgl_block_hdr {
ushort Pool; /* Associated SGL pool */ ushort Pool; /* Associated SGL pool */
struct list_entry List; /* struct sxg_scatter_gather blocks */ /* struct sxg_scatter_gather blocks */
dma64_addr_t PhysicalAddress;/* physical address */ struct list_entry List;
dma64_addr_t PhysicalAddress;/* physical address */
}; };
/* /*
* The following definition denotes the maximum block of memory that the * The following definition denotes the maximum block of memory that the
* card can DMA to. It is specified in the call to NdisMRegisterScatterGatherDma. * card can DMA to.It is specified in the call to NdisMRegisterScatterGatherDma.
* For now, use the same value as used in the Slic/Oasis driver, which * For now, use the same value as used in the Slic/Oasis driver, which
* is 128M. That should cover any expected MDL that I can think of. * is 128M. That should cover any expected MDL that I can think of.
*/ */
...@@ -876,9 +892,9 @@ struct sxg_sgl_block_hdr { ...@@ -876,9 +892,9 @@ struct sxg_sgl_block_hdr {
/* Self identifying structure type */ /* Self identifying structure type */
enum SXG_SGL_TYPE { enum SXG_SGL_TYPE {
SXG_SGL_DUMB, /* Dumb NIC SGL */ SXG_SGL_DUMB, /* Dumb NIC SGL */
SXG_SGL_SLOW, /* Slowpath protocol header - see below */ SXG_SGL_SLOW, /* Slowpath protocol header - see below */
SXG_SGL_CHIMNEY /* Chimney offload SGL */ SXG_SGL_CHIMNEY /* Chimney offload SGL */
}; };
/* /*
...@@ -912,13 +928,16 @@ struct sxg_scatter_gather { ...@@ -912,13 +928,16 @@ struct sxg_scatter_gather {
ushort Pool; /* Associated SGL pool */ ushort Pool; /* Associated SGL pool */
ushort Entries; /* SGL total entries */ ushort Entries; /* SGL total entries */
void * adapter; /* Back pointer to adapter */ void * adapter; /* Back pointer to adapter */
struct list_entry FreeList; /* Free struct sxg_scatter_gather blocks */ /* Free struct sxg_scatter_gather blocks */
struct list_entry AllList; /* All struct sxg_scatter_gather blocks */ struct list_entry FreeList;
/* All struct sxg_scatter_gather blocks */
struct list_entry AllList;
dma_addr_t PhysicalAddress;/* physical address */ dma_addr_t PhysicalAddress;/* physical address */
unsigned char State; /* See SXG_BUFFER state above */ unsigned char State; /* See SXG_BUFFER state above */
unsigned char CmdIndex; /* Command ring index */ unsigned char CmdIndex; /* Command ring index */
struct sk_buff *DumbPacket; /* Associated Packet */ struct sk_buff *DumbPacket; /* Associated Packet */
u32 Direction; /* For asynchronous completions */ /* For asynchronous completions */
u32 Direction;
u32 CurOffset; /* Current SGL offset */ u32 CurOffset; /* Current SGL offset */
u32 SglRef; /* SGL reference count */ u32 SglRef; /* SGL reference count */
struct vlan_hdr VlanTag; /* VLAN tag to be inserted into SGL */ struct vlan_hdr VlanTag; /* VLAN tag to be inserted into SGL */
...@@ -926,7 +945,10 @@ struct sxg_scatter_gather { ...@@ -926,7 +945,10 @@ struct sxg_scatter_gather {
struct sxg_x64_sgl Sgl; /* SGL handed to card */ struct sxg_x64_sgl Sgl; /* SGL handed to card */
}; };
/* Note - the "- 1" is because struct sxg_scatter_gather=>struct sxg_x64_sgl includes 1 SGE.. */ /*
* Note - the "- 1" is because struct sxg_scatter_gather=>struct sxg_x64_sgl
* includes 1 SGE..
*/
#define SXG_SGL_SIZE(_Pool) \ #define SXG_SGL_SIZE(_Pool) \
(sizeof(struct sxg_scatter_gather) + \ (sizeof(struct sxg_scatter_gather) + \
((SxgSglPoolProperties[_Pool].SGEntries - 1) * \ ((SxgSglPoolProperties[_Pool].SGEntries - 1) * \
...@@ -934,7 +956,8 @@ struct sxg_scatter_gather { ...@@ -934,7 +956,8 @@ struct sxg_scatter_gather {
#if defined(CONFIG_X86_64) #if defined(CONFIG_X86_64)
#define SXG_SGL_BUFFER(_SxgSgl) (&_SxgSgl->Sgl) #define SXG_SGL_BUFFER(_SxgSgl) (&_SxgSgl->Sgl)
#define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * sizeof(struct sxg_x64_sge)) #define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * \
sizeof(struct sxg_x64_sge))
#define SXG_SGL_BUF_SIZE sizeof(struct sxg_x64_sgl) #define SXG_SGL_BUF_SIZE sizeof(struct sxg_x64_sgl)
#elif defined(CONFIG_X86) #elif defined(CONFIG_X86)
/* Force NDIS to give us it's own buffer so we can reformat to our own */ /* Force NDIS to give us it's own buffer so we can reformat to our own */
...@@ -952,7 +975,8 @@ struct sxg_ucode_stats { ...@@ -952,7 +975,8 @@ struct sxg_ucode_stats {
u32 ERDrops; /* Rcv drops due to ER full */ u32 ERDrops; /* Rcv drops due to ER full */
u32 NBDrops; /* Rcv drops due to out of host buffers */ u32 NBDrops; /* Rcv drops due to out of host buffers */
u32 PQDrops; /* Rcv drops due to PDQ full */ u32 PQDrops; /* Rcv drops due to PDQ full */
u32 BFDrops; /* Rcv drops due to bad frame: no link addr match, frlen > max */ /* Rcv drops due to bad frame: no link addr match, frlen > max */
u32 BFDrops;
u32 UPDrops; /* Rcv drops due to UPFq full */ u32 UPDrops; /* Rcv drops due to UPFq full */
u32 XNoBufs; /* Xmt drop due to no DRAM Xmit buffer or PxyBuf */ u32 XNoBufs; /* Xmt drop due to no DRAM Xmit buffer or PxyBuf */
}; };
......
...@@ -33,14 +33,18 @@ ...@@ -33,14 +33,18 @@
#define SSID_FUNC_MASK 0xF000 /* Subsystem function mask */ #define SSID_FUNC_MASK 0xF000 /* Subsystem function mask */
/* Base SSID's */ /* Base SSID's */
#define SSID_SAHARA_PROTO 0x0018 /* 100022 Sahara prototype (XenPak) board */ /* 100022 Sahara prototype (XenPak) board */
#define SSID_SAHARA_PROTO 0x0018
#define SSID_SAHARA_FIBER 0x0019 /* 100023 Sahara 1-port fiber board */ #define SSID_SAHARA_FIBER 0x0019 /* 100023 Sahara 1-port fiber board */
#define SSID_SAHARA_COPPER 0x001A /* 100024 Sahara 1-port copper board */ #define SSID_SAHARA_COPPER 0x001A /* 100024 Sahara 1-port copper board */
/* Useful SSID macros */ /* Useful SSID macros */
#define SSID_BASE(ssid) ((ssid) & SSID_BASE_MASK) /* isolate base SSID bits */ /* isolate base SSID bits */
#define SSID_OEM(ssid) ((ssid) & SSID_OEM_MASK) /* isolate SSID OEM bits */ #define SSID_BASE(ssid) ((ssid) & SSID_BASE_MASK)
#define SSID_FUNC(ssid) ((ssid) & SSID_FUNC_MASK) /* isolate SSID function bits */ /* isolate SSID OEM bits */
#define SSID_OEM(ssid) ((ssid) & SSID_OEM_MASK)
/* isolate SSID function bits */
#define SSID_FUNC(ssid) ((ssid) & SSID_FUNC_MASK)
/* HW Register Space */ /* HW Register Space */
...@@ -48,34 +52,34 @@ ...@@ -48,34 +52,34 @@
#pragma pack(push, 1) #pragma pack(push, 1)
struct sxg_hw_regs { struct sxg_hw_regs {
u32 Reset; /* Write 0xdead to invoke soft reset */ u32 Reset; /* Write 0xdead to invoke soft reset */
u32 Pad1; /* No register defined at offset 4 */ u32 Pad1; /* No register defined at offset 4 */
u32 InterruptMask0; /* Deassert legacy interrupt on function 0 */ u32 InterruptMask0; /* Deassert legacy interrupt on function 0 */
u32 InterruptMask1; /* Deassert legacy interrupt on function 1 */ u32 InterruptMask1; /* Deassert legacy interrupt on function 1 */
u32 UcodeDataLow; /* Store microcode instruction bits 31-0 */ u32 UcodeDataLow; /* Store microcode instruction bits 31-0 */
u32 UcodeDataMiddle; /* Store microcode instruction bits 63-32 */ u32 UcodeDataMiddle; /* Store microcode instruction bits 63-32 */
u32 UcodeDataHigh; /* Store microcode instruction bits 95-64 */ u32 UcodeDataHigh; /* Store microcode instruction bits 95-64 */
u32 UcodeAddr; /* Store microcode address - See flags below */ u32 UcodeAddr; /* Store microcode address - See flags below */
u32 PadTo0x80[24]; /* Pad to Xcv configuration registers */ u32 PadTo0x80[24]; /* Pad to Xcv configuration registers */
u32 MacConfig0; /* 0x80 - AXGMAC Configuration Register 0 */ u32 MacConfig0; /* 0x80 - AXGMAC Configuration Register 0 */
u32 MacConfig1; /* 0x84 - AXGMAC Configuration Register 1 */ u32 MacConfig1; /* 0x84 - AXGMAC Configuration Register 1 */
u32 MacConfig2; /* 0x88 - AXGMAC Configuration Register 2 */ u32 MacConfig2; /* 0x88 - AXGMAC Configuration Register 2 */
u32 MacConfig3; /* 0x8C - AXGMAC Configuration Register 3 */ u32 MacConfig3; /* 0x8C - AXGMAC Configuration Register 3 */
u32 MacAddressLow; /* 0x90 - AXGMAC MAC Station Address - octets 1-4 */ u32 MacAddressLow; /* 0x90 - AXGMAC MAC Station Address - octets 1-4 */
u32 MacAddressHigh; /* 0x94 - AXGMAC MAC Station Address - octets 5-6 */ u32 MacAddressHigh; /* 0x94 - AXGMAC MAC Station Address - octets 5-6 */
u32 MacReserved1[2]; /* 0x98 - AXGMAC Reserved */ u32 MacReserved1[2]; /* 0x98 - AXGMAC Reserved */
u32 MacMaxFrameLen; /* 0xA0 - AXGMAC Maximum Frame Length */ u32 MacMaxFrameLen; /* 0xA0 - AXGMAC Maximum Frame Length */
u32 MacReserved2[2]; /* 0xA4 - AXGMAC Reserved */ u32 MacReserved2[2]; /* 0xA4 - AXGMAC Reserved */
u32 MacRevision; /* 0xAC - AXGMAC Revision Level Register */ u32 MacRevision; /* 0xAC - AXGMAC Revision Level Register */
u32 MacReserved3[4]; /* 0xB0 - AXGMAC Reserved */ u32 MacReserved3[4]; /* 0xB0 - AXGMAC Reserved */
u32 MacAmiimCmd; /* 0xC0 - AXGMAC AMIIM Command Register */ u32 MacAmiimCmd; /* 0xC0 - AXGMAC AMIIM Command Register */
u32 MacAmiimField; /* 0xC4 - AXGMAC AMIIM Field Register */ u32 MacAmiimField; /* 0xC4 - AXGMAC AMIIM Field Register */
u32 MacAmiimConfig; /* 0xC8 - AXGMAC AMIIM Configuration Register */ u32 MacAmiimConfig; /* 0xC8 - AXGMAC AMIIM Configuration Register */
u32 MacAmiimLink; /* 0xCC - AXGMAC AMIIM Link Fail Vector Register */ u32 MacAmiimLink; /* 0xCC - AXGMAC AMIIM Link Fail Vector Register */
u32 MacAmiimIndicator; /* 0xD0 - AXGMAC AMIIM Indicator Registor */ u32 MacAmiimIndicator; /* 0xD0 - AXGMAC AMIIM Indicator Registor */
u32 PadTo0x100[11]; /* 0xD4 - 0x100 - Pad */ u32 PadTo0x100[11]; /* 0xD4 - 0x100 - Pad */
u32 XmtConfig; /* 0x100 - Transmit Configuration Register */ u32 XmtConfig; /* 0x100 - Transmit Configuration Register */
u32 RcvConfig; /* 0x104 - Receive Configuration Register 1 */ u32 RcvConfig; /* 0x104 - Receive Configuration Register 1 */
u32 LinkAddress0Low; /* 0x108 - Link address 0 */ u32 LinkAddress0Low; /* 0x108 - Link address 0 */
u32 LinkAddress0High; /* 0x10C - Link address 0 */ u32 LinkAddress0High; /* 0x10C - Link address 0 */
u32 LinkAddress1Low; /* 0x110 - Link address 1 */ u32 LinkAddress1Low; /* 0x110 - Link address 1 */
...@@ -90,10 +94,10 @@ struct sxg_hw_regs { ...@@ -90,10 +94,10 @@ struct sxg_hw_regs {
u32 ClearStats; /* 0x17C - Clear Stats */ u32 ClearStats; /* 0x17C - Clear Stats */
u32 XmtErrorsLow; /* 0x180 - Transmit stats - errors */ u32 XmtErrorsLow; /* 0x180 - Transmit stats - errors */
u32 XmtErrorsHigh; /* 0x184 - Transmit stats - errors */ u32 XmtErrorsHigh; /* 0x184 - Transmit stats - errors */
u32 XmtFramesLow; /* 0x188 - Transmit stats - frame count */ u32 XmtFramesLow; /* 0x188 - Transmit stats - frame count */
u32 XmtFramesHigh; /* 0x18C - Transmit stats - frame count */ u32 XmtFramesHigh; /* 0x18C - Transmit stats - frame count */
u32 XmtBytesLow; /* 0x190 - Transmit stats - byte count */ u32 XmtBytesLow; /* 0x190 - Transmit stats - byte count */
u32 XmtBytesHigh; /* 0x194 - Transmit stats - byte count */ u32 XmtBytesHigh; /* 0x194 - Transmit stats - byte count */
u32 XmtTcpSegmentsLow; /* 0x198 - Transmit stats - TCP segments */ u32 XmtTcpSegmentsLow; /* 0x198 - Transmit stats - TCP segments */
u32 XmtTcpSegmentsHigh; /* 0x19C - Transmit stats - TCP segments */ u32 XmtTcpSegmentsHigh; /* 0x19C - Transmit stats - TCP segments */
u32 XmtTcpBytesLow; /* 0x1A0 - Transmit stats - TCP bytes */ u32 XmtTcpBytesLow; /* 0x1A0 - Transmit stats - TCP bytes */
...@@ -119,11 +123,12 @@ struct sxg_hw_regs { ...@@ -119,11 +123,12 @@ struct sxg_hw_regs {
#define MICROCODE_ADDRESS_GO 0x80000000 /* Start microcode */ #define MICROCODE_ADDRESS_GO 0x80000000 /* Start microcode */
#define MICROCODE_ADDRESS_WRITE 0x40000000 /* Store microcode */ #define MICROCODE_ADDRESS_WRITE 0x40000000 /* Store microcode */
#define MICROCODE_ADDRESS_READ 0x20000000 /* Read microcode */ #define MICROCODE_ADDRESS_READ 0x20000000 /* Read microcode */
#define MICROCODE_ADDRESS_PARITY 0x10000000 /* Parity error detected */ #define MICROCODE_ADDRESS_PARITY 0x10000000/* Parity error detected */
#define MICROCODE_ADDRESS_MASK 0x00001FFF /* Address bits */ #define MICROCODE_ADDRESS_MASK 0x00001FFF /* Address bits */
/* Link Address Registers */ /* Link Address Registers */
#define LINK_ADDRESS_ENABLE 0x80000000 /* Applied to link address high */ /* Applied to link address high */
#define LINK_ADDRESS_ENABLE 0x80000000
/* Microsoft register space size */ /* Microsoft register space size */
#define SXG_UCODEREG_MEMSIZE 0x40000 /* 256k */ #define SXG_UCODEREG_MEMSIZE 0x40000 /* 256k */
...@@ -135,110 +140,153 @@ struct sxg_hw_regs { ...@@ -135,110 +140,153 @@ struct sxg_hw_regs {
*/ */
#define SXG_ADDRESS_CODE_SHIFT 2 /* Base command code */ #define SXG_ADDRESS_CODE_SHIFT 2 /* Base command code */
#define SXG_ADDRESS_CODE_MASK 0x0000003C #define SXG_ADDRESS_CODE_MASK 0x0000003C
#define SXG_ADDRESS_EXCODE_SHIFT 6 /* Extended (or sub) command code */ /* Extended (or sub) command code */
#define SXG_ADDRESS_EXCODE_SHIFT 6
#define SXG_ADDRESS_EXCODE_MASK 0x00001FC0 #define SXG_ADDRESS_EXCODE_MASK 0x00001FC0
#define SXG_ADDRESS_CPUID_SHIFT 13 /* CPU */ #define SXG_ADDRESS_CPUID_SHIFT 13 /* CPU */
#define SXG_ADDRESS_CPUID_MASK 0x0003E000 #define SXG_ADDRESS_CPUID_MASK 0x0003E000
#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 /* Used to sanity check UCODE_REGS structure */ /* Used to sanity check UCODE_REGS structure */
#define SXG_REGISTER_SIZE_PER_CPU 0x00002000
/* Sahara receive sequencer status values */ /* Sahara receive sequencer status values */
#define SXG_RCV_STATUS_ATTN 0x80000000 /* Attention */ #define SXG_RCV_STATUS_ATTN 0x80000000 /* Attention */
#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 /* Transport mask */ #define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 /* Transport mask */
#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 /* Transport error */ #define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 /* Transport error */
#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 /* Transport cksum error */ /* Transport cksum error */
#define SXG_RCV_STATUS_TRANSPORT_UFLOW 0x22000000 /* Transport underflow */ #define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000
#define SXG_RCV_STATUS_TRANSPORT_HDRLEN 0x20000000 /* Transport header length */ /* Transport underflow */
#define SXG_RCV_STATUS_TRANSPORT_FLAGS 0x10000000 /* Transport flags detected */ #define SXG_RCV_STATUS_TRANSPORT_UFLOW 0x22000000
#define SXG_RCV_STATUS_TRANSPORT_OPTS 0x08000000 /* Transport options detected */ /* Transport header length */
#define SXG_RCV_STATUS_TRANSPORT_SESS_MASK 0x07000000 /* Transport DDP */ #define SXG_RCV_STATUS_TRANSPORT_HDRLEN 0x20000000
#define SXG_RCV_STATUS_TRANSPORT_DDP 0x06000000 /* Transport DDP */ /* Transport flags detected */
#define SXG_RCV_STATUS_TRANSPORT_iSCSI 0x05000000 /* Transport iSCSI */ #define SXG_RCV_STATUS_TRANSPORT_FLAGS 0x10000000
#define SXG_RCV_STATUS_TRANSPORT_NFS 0x04000000 /* Transport NFS */ /* Transport options detected */
#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 /* Transport FTP */ #define SXG_RCV_STATUS_TRANSPORT_OPTS 0x08000000
#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 /* Transport HTTP */ #define SXG_RCV_STATUS_TRANSPORT_SESS_MASK 0x07000000 /* Transport DDP */
#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 /* Transport SMB */ #define SXG_RCV_STATUS_TRANSPORT_DDP 0x06000000 /* Transport DDP */
#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 /* Network mask */ #define SXG_RCV_STATUS_TRANSPORT_iSCSI 0x05000000 /* Transport iSCSI */
#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 /* Network error */ #define SXG_RCV_STATUS_TRANSPORT_NFS 0x04000000 /* Transport NFS */
#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 /* Network cksum error */ #define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 /* Transport FTP */
#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 /* Network underflow error */ #define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 /* Transport HTTP */
#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 /* Network header length */ #define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 /* Transport SMB */
#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 /* Network overflow detected */ #define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 /* Network mask */
#define SXG_RCV_STATUS_NETWORK_MCAST 0x00200000 /* Network multicast detected */ #define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 /* Network error */
#define SXG_RCV_STATUS_NETWORK_OPTIONS 0x00100000 /* Network options detected */ /* Network cksum error */
#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 /* Network offset detected */ #define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000
#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 /* Network fragment detected */ /* Network underflow error */
#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 /* Network transport type mask */ #define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000
#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 /* UDP */ /* Network header length */
#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 /* TCP */ #define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000
#define SXG_RCV_STATUS_IPONLY 0x00008000 /* IP-only not TCP */ /* Network overflow detected */
#define SXG_RCV_STATUS_PKT_PRI 0x00006000 /* Receive priority */ #define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000
#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 /* Receive priority shift */ /* Network multicast detected */
#define SXG_RCV_STATUS_PARITY 0x00001000 /* MAC Receive RAM parity error */ #define SXG_RCV_STATUS_NETWORK_MCAST 0x00200000
#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 /* Link address detection mask */ /* Network options detected */
#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 /* Link address D */ #define SXG_RCV_STATUS_NETWORK_OPTIONS 0x00100000
#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 /* Link address C */ /* Network offset detected */
#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 /* Link address B */ #define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000
#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 /* Link address A */ /* Network fragment detected */
#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 /* Link address broadcast */ #define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000
#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 /* Link address multicast */ /* Network transport type mask */
#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 /* Link control multicast */ #define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF /* Link status mask */ #define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 /* UDP */
#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 /* Link error */ #define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 /* TCP */
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF /* Link status mask */ #define SXG_RCV_STATUS_IPONLY 0x00008000 /* IP-only not TCP */
#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 /* RcvMacQ parity error */ /* Receive priority */
#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 /* Data early */ #define SXG_RCV_STATUS_PKT_PRI 0x00006000
#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 /* Buffer overflow */ /* Receive priority shift */
#define SXG_RCV_STATUS_LINK_CODE 0x00000084 /* Link code error */ #define SXG_RCV_STATUS_PKT_PRI_SHFT 13
#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 /* Dribble nibble */ /* MAC Receive RAM parity error */
#define SXG_RCV_STATUS_LINK_CRC 0x00000082 /* CRC error */ #define SXG_RCV_STATUS_PARITY 0x00001000
#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 /* Link overflow */ /* Link address detection mask */
#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 /* Link underflow */ #define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00
#define SXG_RCV_STATUS_LINK_8023 0x00000020 /* 802.3 */
#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 /* Snap */ #define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 /* Link address D */
#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 /* VLAN */ #define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 /* Link address C */
#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 /* Network type mask */ #define SXG_RCV_STATUS_ADDRESS_B 0x00000900 /* Link address B */
#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 /* Control packet */ #define SXG_RCV_STATUS_ADDRESS_A 0x00000800 /* Link address A */
#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 /* IPv6 packet */ /* Link address broadcast */
#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 /* IPv4 packet */ #define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300
/* Link address multicast */
#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200
/* Link control multicast */
#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100
/* Link status mask */
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF
#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 /* Link error */
/* Link status mask */
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF
/* RcvMacQ parity error */
#define SXG_RCV_STATUS_LINK_PARITY 0x00000087
#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 /* Data early */
#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 /* Buffer overflow */
#define SXG_RCV_STATUS_LINK_CODE 0x00000084 /* Link code error */
#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 /* Dribble nibble */
#define SXG_RCV_STATUS_LINK_CRC 0x00000082 /* CRC error */
#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 /* Link overflow */
#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 /* Link underflow */
#define SXG_RCV_STATUS_LINK_8023 0x00000020 /* 802.3 */
#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 /* Snap */
#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 /* VLAN */
/* Network type mask */
#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007
#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 /* Control packet */
#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 /* IPv6 packet */
#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 /* IPv4 packet */
/* Sahara receive and transmit configuration registers */ /* Sahara receive and transmit configuration registers */
#define RCV_CONFIG_RESET 0x80000000 /* RcvConfig register reset */ /* RcvConfig register reset */
#define RCV_CONFIG_ENABLE 0x40000000 /* Enable the receive logic */ #define RCV_CONFIG_RESET 0x80000000
#define RCV_CONFIG_ENPARSE 0x20000000 /* Enable the receive parser */ /* Enable the receive logic */
#define RCV_CONFIG_SOCKET 0x10000000 /* Enable the socket detector */ #define RCV_CONFIG_ENABLE 0x40000000
#define RCV_CONFIG_RCVBAD 0x08000000 /* Receive all bad frames */ /* Enable the receive parser */
#define RCV_CONFIG_CONTROL 0x04000000 /* Receive all control frames */ #define RCV_CONFIG_ENPARSE 0x20000000
#define RCV_CONFIG_RCVPAUSE 0x02000000 /* Enable pause transmit when attn */ /* Enable the socket detector */
#define RCV_CONFIG_TZIPV6 0x01000000 /* Include TCP port w/ IPv6 toeplitz */ #define RCV_CONFIG_SOCKET 0x10000000
#define RCV_CONFIG_TZIPV4 0x00800000 /* Include TCP port w/ IPv4 toeplitz */ #define RCV_CONFIG_RCVBAD 0x08000000 /* Receive all bad frames */
#define RCV_CONFIG_FLUSH 0x00400000 /* Flush buffers */ /* Receive all control frames */
#define RCV_CONFIG_PRIORITY_MASK 0x00300000 /* Priority level */ #define RCV_CONFIG_CONTROL 0x04000000
#define RCV_CONFIG_CONN_MASK 0x000C0000 /* Number of connections */ /* Enable pause transmit when attn */
#define RCV_CONFIG_CONN_4K 0x00000000 /* 4k connections */ #define RCV_CONFIG_RCVPAUSE 0x02000000
#define RCV_CONFIG_CONN_2K 0x00040000 /* 2k connections */ /* Include TCP port w/ IPv6 toeplitz */
#define RCV_CONFIG_CONN_1K 0x00080000 /* 1k connections */ #define RCV_CONFIG_TZIPV6 0x01000000
#define RCV_CONFIG_CONN_512 0x000C0000 /* 512 connections */ /* Include TCP port w/ IPv4 toeplitz */
#define RCV_CONFIG_HASH_MASK 0x00030000 /* Hash depth */ #define RCV_CONFIG_TZIPV4 0x00800000
#define RCV_CONFIG_HASH_8 0x00000000 /* Hash depth 8 */ #define RCV_CONFIG_FLUSH 0x00400000 /* Flush buffers */
#define RCV_CONFIG_HASH_16 0x00010000 /* Hash depth 16 */ #define RCV_CONFIG_PRIORITY_MASK 0x00300000 /* Priority level */
#define RCV_CONFIG_HASH_4 0x00020000 /* Hash depth 4 */ #define RCV_CONFIG_CONN_MASK 0x000C0000 /* Number of connections */
#define RCV_CONFIG_HASH_2 0x00030000 /* Hash depth 2 */ #define RCV_CONFIG_CONN_4K 0x00000000 /* 4k connections */
#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 /* Buffer length bits 15:4. ie multiple of 16. */ #define RCV_CONFIG_CONN_2K 0x00040000 /* 2k connections */
#define RCV_CONFIG_SKT_DIS 0x00000008 /* Disable socket detection on attn */ #define RCV_CONFIG_CONN_1K 0x00080000 /* 1k connections */
#define RCV_CONFIG_CONN_512 0x000C0000 /* 512 connections */
#define RCV_CONFIG_HASH_MASK 0x00030000 /* Hash depth */
#define RCV_CONFIG_HASH_8 0x00000000 /* Hash depth 8 */
#define RCV_CONFIG_HASH_16 0x00010000 /* Hash depth 16 */
#define RCV_CONFIG_HASH_4 0x00020000 /* Hash depth 4 */
#define RCV_CONFIG_HASH_2 0x00030000 /* Hash depth 2 */
/* Buffer length bits 15:4. ie multiple of 16. */
#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0
/* Disable socket detection on attn */
#define RCV_CONFIG_SKT_DIS 0x00000008
/* /*
* Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size. * Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
* We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC, * We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
* and round up to nearest 16 byte boundary * and round up to nearest 16 byte boundary
*/ */
#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK) #define RCV_CONFIG_BUFSIZE(_MaxFrame) \
((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK)
#define XMT_CONFIG_RESET 0x80000000 /* XmtConfig register reset */
#define XMT_CONFIG_ENABLE 0x40000000 /* Enable transmit logic */ /* XmtConfig register reset */
#define XMT_CONFIG_MAC_PARITY 0x20000000 /* Inhibit MAC RAM parity error */ #define XMT_CONFIG_RESET 0x80000000
#define XMT_CONFIG_BUF_PARITY 0x10000000 /* Inhibit D2F buffer parity error */ #define XMT_CONFIG_ENABLE 0x40000000 /* Enable transmit logic */
#define XMT_CONFIG_MEM_PARITY 0x08000000 /* Inhibit 1T SRAM parity error */ /* Inhibit MAC RAM parity error */
#define XMT_CONFIG_INVERT_PARITY 0x04000000 /* Invert MAC RAM parity */ #define XMT_CONFIG_MAC_PARITY 0x20000000
#define XMT_CONFIG_INITIAL_IPID 0x0000FFFF /* Initial IPID */ /* Inhibit D2F buffer parity error */
#define XMT_CONFIG_BUF_PARITY 0x10000000
/* Inhibit 1T SRAM parity error */
#define XMT_CONFIG_MEM_PARITY 0x08000000
#define XMT_CONFIG_INVERT_PARITY 0x04000000 /* Invert MAC RAM parity */
#define XMT_CONFIG_INITIAL_IPID 0x0000FFFF /* Initial IPID */
/* /*
* A-XGMAC Registers - Occupy 0x80 - 0xD4 of the struct sxg_hw_regs * A-XGMAC Registers - Occupy 0x80 - 0xD4 of the struct sxg_hw_regs
...@@ -246,68 +294,93 @@ struct sxg_hw_regs { ...@@ -246,68 +294,93 @@ struct sxg_hw_regs {
* Full register descriptions can be found in axgmac.pdf * Full register descriptions can be found in axgmac.pdf
*/ */
/* A-XGMAC Configuration Register 0 */ /* A-XGMAC Configuration Register 0 */
#define AXGMAC_CFG0_SUB_RESET 0x80000000 /* Sub module reset */ #define AXGMAC_CFG0_SUB_RESET 0x80000000 /* Sub module reset */
#define AXGMAC_CFG0_RCNTRL_RESET 0x00400000 /* Receive control reset */ #define AXGMAC_CFG0_RCNTRL_RESET 0x00400000 /* Receive control reset */
#define AXGMAC_CFG0_RFUNC_RESET 0x00200000 /* Receive function reset */ #define AXGMAC_CFG0_RFUNC_RESET 0x00200000 /* Receive function reset */
#define AXGMAC_CFG0_TCNTRL_RESET 0x00040000 /* Transmit control reset */ #define AXGMAC_CFG0_TCNTRL_RESET 0x00040000 /* Transmit control reset */
#define AXGMAC_CFG0_TFUNC_RESET 0x00020000 /* Transmit function reset */ #define AXGMAC_CFG0_TFUNC_RESET 0x00020000 /* Transmit function reset */
#define AXGMAC_CFG0_MII_RESET 0x00010000 /* MII Management reset */ #define AXGMAC_CFG0_MII_RESET 0x00010000 /* MII Management reset */
/* A-XGMAC Configuration Register 1 */ /* A-XGMAC Configuration Register 1 */
#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 /* Allow the sending of Pause frames */ /* Allow the sending of Pause frames */
#define AXGMAC_CFG1_XMT_EN 0x40000000 /* Enable transmit */ #define AXGMAC_CFG1_XMT_PAUSE 0x80000000
#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 /* Allow the detection of Pause frames */ #define AXGMAC_CFG1_XMT_EN 0x40000000 /* Enable transmit */
#define AXGMAC_CFG1_RCV_EN 0x10000000 /* Enable receive */ /* Allow the detection of Pause frames */
#define AXGMAC_CFG1_XMT_STATE 0x04000000 /* Current transmit state - READ ONLY */ #define AXGMAC_CFG1_RCV_PAUSE 0x20000000
#define AXGMAC_CFG1_RCV_STATE 0x01000000 /* Current receive state - READ ONLY */ #define AXGMAC_CFG1_RCV_EN 0x10000000 /* Enable receive */
#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 /* Only pause for 64 slot on XOFF */ /* Current transmit state - READ ONLY */
#define AXGMAC_CFG1_XMG_FCS1 0x00000400 /* Delay transmit FCS 1 4-byte word */ #define AXGMAC_CFG1_XMT_STATE 0x04000000
#define AXGMAC_CFG1_XMG_FCS2 0x00000800 /* Delay transmit FCS 2 4-byte words */ /* Current receive state - READ ONLY */
#define AXGMAC_CFG1_XMG_FCS3 0x00000C00 /* Delay transmit FCS 3 4-byte words */ #define AXGMAC_CFG1_RCV_STATE 0x01000000
#define AXGMAC_CFG1_RCV_FCS1 0x00000100 /* Delay receive FCS 1 4-byte word */ /* Only pause for 64 slot on XOFF */
#define AXGMAC_CFG1_RCV_FCS2 0x00000200 /* Delay receive FCS 2 4-byte words */ #define AXGMAC_CFG1_XOFF_SHORT 0x00001000
#define AXGMAC_CFG1_RCV_FCS3 0x00000300 /* Delay receive FCS 3 4-byte words */ /* Delay transmit FCS 1 4-byte word */
#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 /* Per-packet override enable */ #define AXGMAC_CFG1_XMG_FCS1 0x00000400
#define AXGMAC_CFG1_SWAP 0x00000040 /* Byte swap enable */ /* Delay transmit FCS 2 4-byte words */
#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 /* ASSERT srdrpfrm on short frame (<64) */ #define AXGMAC_CFG1_XMG_FCS2 0x00000800
#define AXGMAC_CFG1_RCV_STRICT 0x00000010 /* RCV only 802.3AE when CLEAR */ /* Delay transmit FCS 3 4-byte words */
#define AXGMAC_CFG1_CHECK_LEN 0x00000008 /* Verify frame length */ #define AXGMAC_CFG1_XMG_FCS3 0x00000C00
#define AXGMAC_CFG1_GEN_FCS 0x00000004 /* Generate FCS */ /* Delay receive FCS 1 4-byte word */
#define AXGMAC_CFG1_PAD_MASK 0x00000003 /* Mask for pad bits */ #define AXGMAC_CFG1_RCV_FCS1 0x00000100
#define AXGMAC_CFG1_PAD_64 0x00000001 /* Pad frames to 64 bytes */ /* Delay receive FCS 2 4-byte words */
#define AXGMAC_CFG1_PAD_VLAN 0x00000002 /* Detect VLAN and pad to 68 bytes */ #define AXGMAC_CFG1_RCV_FCS2 0x00000200
#define AXGMAC_CFG1_PAD_68 0x00000003 /* Pad to 68 bytes */ /* Delay receive FCS 3 4-byte words */
#define AXGMAC_CFG1_RCV_FCS3 0x00000300
/* Per-packet override enable */
#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080
#define AXGMAC_CFG1_SWAP 0x00000040 /* Byte swap enable */
/* ASSERT srdrpfrm on short frame (<64) */
#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020
/* RCV only 802.3AE when CLEAR */
#define AXGMAC_CFG1_RCV_STRICT 0x00000010
#define AXGMAC_CFG1_CHECK_LEN 0x00000008 /* Verify frame length */
#define AXGMAC_CFG1_GEN_FCS 0x00000004 /* Generate FCS */
#define AXGMAC_CFG1_PAD_MASK 0x00000003 /* Mask for pad bits */
#define AXGMAC_CFG1_PAD_64 0x00000001 /* Pad frames to 64 bytes */
/* Detect VLAN and pad to 68 bytes */
#define AXGMAC_CFG1_PAD_VLAN 0x00000002
#define AXGMAC_CFG1_PAD_68 0x00000003 /* Pad to 68 bytes */
/* A-XGMAC Configuration Register 2 */ /* A-XGMAC Configuration Register 2 */
#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 /* Generate single pause frame (test) */ /* Generate single pause frame (test) */
#define AXGMAC_CFG2_LF_MANUAL 0x08000000 /* Manual link fault sequence */ #define AXGMAC_CFG2_GEN_PAUSE 0x80000000
#define AXGMAC_CFG2_LF_AUTO 0x04000000 /* Auto link fault sequence */ /* Manual link fault sequence */
#define AXGMAC_CFG2_LF_REMOTE 0x02000000 /* Remote link fault (READ ONLY) */ #define AXGMAC_CFG2_LF_MANUAL 0x08000000
#define AXGMAC_CFG2_LF_LOCAL 0x01000000 /* Local link fault (READ ONLY) */ /* Auto link fault sequence */
#define AXGMAC_CFG2_IPG_MASK 0x001F0000 /* Inter packet gap */ #define AXGMAC_CFG2_LF_AUTO 0x04000000
/* Remote link fault (READ ONLY) */
#define AXGMAC_CFG2_LF_REMOTE 0x02000000
/* Local link fault (READ ONLY) */
#define AXGMAC_CFG2_LF_LOCAL 0x01000000
#define AXGMAC_CFG2_IPG_MASK 0x001F0000 /* Inter packet gap */
#define AXGMAC_CFG2_IPG_SHIFT 16 #define AXGMAC_CFG2_IPG_SHIFT 16
#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 /* Pause transmit module */ #define AXGMAC_CFG2_PAUSE_XMT 0x00008000 /* Pause transmit module */
#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 /* Enable IPG extension algorithm */ /* Enable IPG extension algorithm */
#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F /* IPG extension */ #define AXGMAC_CFG2_IPG_EXTEN 0x00000020
#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F /* IPG extension */
/* A-XGMAC Configuration Register 3 */ /* A-XGMAC Configuration Register 3 */
#define AXGMAC_CFG3_RCV_DROP 0xFFFF0000 /* Receive frame drop filter */ /* Receive frame drop filter */
#define AXGMAC_CFG3_RCV_DONT_CARE 0x0000FFFF /* Receive frame don't care filter */ #define AXGMAC_CFG3_RCV_DROP 0xFFFF0000
/* Receive frame don't care filter */
#define AXGMAC_CFG3_RCV_DONT_CARE 0x0000FFFF
/* A-XGMAC Station Address Register - Octets 1-4 */ /* A-XGMAC Station Address Register - Octets 1-4 */
#define AXGMAC_SARLOW_OCTET_ONE 0xFF000000 /* First octet */ #define AXGMAC_SARLOW_OCTET_ONE 0xFF000000 /* First octet */
#define AXGMAC_SARLOW_OCTET_TWO 0x00FF0000 /* Second octet */ #define AXGMAC_SARLOW_OCTET_TWO 0x00FF0000 /* Second octet */
#define AXGMAC_SARLOW_OCTET_THREE 0x0000FF00 /* Third octet */ #define AXGMAC_SARLOW_OCTET_THREE 0x0000FF00 /* Third octet */
#define AXGMAC_SARLOW_OCTET_FOUR 0x000000FF /* Fourth octet */ #define AXGMAC_SARLOW_OCTET_FOUR 0x000000FF /* Fourth octet */
/* A-XGMAC Station Address Register - Octets 5-6 */ /* A-XGMAC Station Address Register - Octets 5-6 */
#define AXGMAC_SARHIGH_OCTET_FIVE 0xFF000000 /* Fifth octet */ #define AXGMAC_SARHIGH_OCTET_FIVE 0xFF000000 /* Fifth octet */
#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 /* Sixth octet */ #define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 /* Sixth octet */
/* A-XGMAC Maximum frame length register */ /* A-XGMAC Maximum frame length register */
#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 /* Maximum transmit frame length */ /* Maximum transmit frame length */
#define AXGMAC_MAXFRAME_XMT 0x3FFF0000
#define AXGMAC_MAXFRAME_XMT_SHIFT 16 #define AXGMAC_MAXFRAME_XMT_SHIFT 16
#define AXGMAC_MAXFRAME_RCV 0x0000FFFF /* Maximum receive frame length */ /* Maximum receive frame length */
#define AXGMAC_MAXFRAME_RCV 0x0000FFFF
/* /*
* This register doesn't need to be written for standard MTU. * This register doesn't need to be written for standard MTU.
* For jumbo, I'll just statically define the value here. This * For jumbo, I'll just statically define the value here. This
...@@ -323,25 +396,32 @@ struct sxg_hw_regs { ...@@ -323,25 +396,32 @@ struct sxg_hw_regs {
/* A-XGMAC AMIIM Command Register */ /* A-XGMAC AMIIM Command Register */
#define AXGMAC_AMIIM_CMD_START 0x00000008 /* Command start */ #define AXGMAC_AMIIM_CMD_START 0x00000008 /* Command start */
#define AXGMAC_AMIIM_CMD_MASK 0x00000007 /* Command */ #define AXGMAC_AMIIM_CMD_MASK 0x00000007 /* Command */
#define AXGMAC_AMIIM_CMD_LEGACY_WRITE 1 /* 10/100/1000 Mbps Phy Write */ /* 10/100/1000 Mbps Phy Write */
#define AXGMAC_AMIIM_CMD_LEGACY_READ 2 /* 10/100/1000 Mbps Phy Read */ #define AXGMAC_AMIIM_CMD_LEGACY_WRITE 1
/* 10/100/1000 Mbps Phy Read */
#define AXGMAC_AMIIM_CMD_LEGACY_READ 2
#define AXGMAC_AMIIM_CMD_MONITOR_SINGLE 3 /* Monitor single PHY */ #define AXGMAC_AMIIM_CMD_MONITOR_SINGLE 3 /* Monitor single PHY */
#define AXGMAC_AMIIM_CMD_MONITOR_MULTIPLE 4 /* Monitor multiple contiguous PHYs */ /* Monitor multiple contiguous PHYs */
#define AXGMAC_AMIIM_CMD_10G_OPERATION 5 /* Present AMIIM Field Reg */ #define AXGMAC_AMIIM_CMD_MONITOR_MULTIPLE 4
#define AXGMAC_AMIIM_CMD_CLEAR_LINK_FAIL 6 /* Clear Link Fail Bit in MIIM */ /* Present AMIIM Field Reg */
#define AXGMAC_AMIIM_CMD_10G_OPERATION 5
/* Clear Link Fail Bit in MIIM */
#define AXGMAC_AMIIM_CMD_CLEAR_LINK_FAIL 6
/* A-XGMAC AMIIM Field Register */ /* A-XGMAC AMIIM Field Register */
#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 /* 2-bit ST field */ #define AXGMAC_AMIIM_FIELD_ST 0xC0000000 /* 2-bit ST field */
#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30 #define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
#define AXGMAC_AMIIM_FIELD_OP 0x30000000 /* 2-bit OP field */ #define AXGMAC_AMIIM_FIELD_OP 0x30000000 /* 2-bit OP field */
#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28 #define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 /* Port address field (hstphyadx in spec) */ /* Port address field (hstphyadx in spec) */
#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000
#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23 #define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23
#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 /* Device address field (hstregadx in spec) */ /* Device address field (hstregadx in spec) */
#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000
#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18 #define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18
#define AXGMAC_AMIIM_FIELD_TA 0x00030000 /* 2-bit TA field */ #define AXGMAC_AMIIM_FIELD_TA 0x00030000 /* 2-bit TA field */
#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16 #define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field #define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF /* Data field */
/* Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register */ /* Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register */
#define MIIM_OP_ADDR 0 /* MIIM Address set operation */ #define MIIM_OP_ADDR 0 /* MIIM Address set operation */
...@@ -349,49 +429,76 @@ struct sxg_hw_regs { ...@@ -349,49 +429,76 @@ struct sxg_hw_regs {
#define MIIM_OP_READ 2 /* MIIM Read register operation */ #define MIIM_OP_READ 2 /* MIIM Read register operation */
#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) #define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT)
/* Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register */ /*
* Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM
* Field Register
*/
#define MIIM_PORT_NUM 1 /* All Sahara MIIM modules use port 1 */ #define MIIM_PORT_NUM 1 /* All Sahara MIIM modules use port 1 */
/* Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register */ /*
#define MIIM_DEV_PHY_PMA 1 /* PHY PMA/PMD module MIIM device number */ * Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM
#define MIIM_DEV_PHY_PCS 3 /* PHY PCS module MIIM device number */ * Field Register
#define MIIM_DEV_PHY_XS 4 /* PHY XS module MIIM device number */ */
/* PHY PMA/PMD module MIIM device number */
#define MIIM_DEV_PHY_PMA 1
/* PHY PCS module MIIM device number */
#define MIIM_DEV_PHY_PCS 3
/* PHY XS module MIIM device number */
#define MIIM_DEV_PHY_XS 4
#define MIIM_DEV_XGXS 5 /* XGXS MIIM device number */ #define MIIM_DEV_XGXS 5 /* XGXS MIIM device number */
/* Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register */ /*
* Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field
* Register
*/
#define MIIM_TA_10GB 2 /* set to 2 for 10 GB operation */ #define MIIM_TA_10GB 2 /* set to 2 for 10 GB operation */
/* A-XGMAC AMIIM Configuration Register */ /* A-XGMAC AMIIM Configuration Register */
#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 /* Bypass preamble of mngmt frame */ /* Bypass preamble of mngmt frame */
#define AXGMAC_AMIIM_CFG_HALF_CLOCK 0x0000007F /* half-clock duration of MDC output */ #define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080
/* half-clock duration of MDC output */
#define AXGMAC_AMIIM_CFG_HALF_CLOCK 0x0000007F
/* A-XGMAC AMIIM Indicator Register */ /* A-XGMAC AMIIM Indicator Register */
#define AXGMAC_AMIIM_INDC_LINK 0x00000010 /* Link status from legacy PHY or MMD */ /* Link status from legacy PHY or MMD */
#define AXGMAC_AMIIM_INDC_MPHY 0x00000008 /* Multiple phy operation in progress */ #define AXGMAC_AMIIM_INDC_LINK 0x00000010
#define AXGMAC_AMIIM_INDC_SPHY 0x00000004 /* Single phy operation in progress */ /* Multiple phy operation in progress */
#define AXGMAC_AMIIM_INDC_MON 0x00000002 /* Single or multiple monitor cmd */ #define AXGMAC_AMIIM_INDC_MPHY 0x00000008
#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 /* Set until cmd operation complete */ /* Single phy operation in progress */
#define AXGMAC_AMIIM_INDC_SPHY 0x00000004
/* Single or multiple monitor cmd */
#define AXGMAC_AMIIM_INDC_MON 0x00000002
/* Set until cmd operation complete */
#define AXGMAC_AMIIM_INDC_BUSY 0x00000001
/* Link Status and Control Register */ /* Link Status and Control Register */
#define LS_PHY_CLR_RESET 0x80000000 /* Clear reset signal to PHY */ #define LS_PHY_CLR_RESET 0x80000000 /* Clear reset signal to PHY */
#define LS_SERDES_POWER_DOWN 0x40000000 /* Power down the Sahara Serdes */ #define LS_SERDES_POWER_DOWN 0x40000000 /* Power down the Sahara Serdes */
#define LS_XGXS_ENABLE 0x20000000 /* Enable the XAUI XGXS logic */ #define LS_XGXS_ENABLE 0x20000000 /* Enable the XAUI XGXS logic */
#define LS_XGXS_CTL 0x10000000 /* Hold XAUI XGXS logic reset until Serdes is up */ /* Hold XAUI XGXS logic reset until Serdes is up */
#define LS_SERDES_DOWN 0x08000000 /* When 0, XAUI Serdes is up and initialization is complete */ #define LS_XGXS_CTL 0x10000000
#define LS_TRACE_DOWN 0x04000000 /* When 0, Trace Serdes is up and initialization is complete */ /* When 0, XAUI Serdes is up and initialization is complete */
#define LS_PHY_CLK_25MHZ 0x02000000 /* Set PHY clock to 25 MHz (else 156.125 MHz) */ #define LS_SERDES_DOWN 0x08000000
/* When 0, Trace Serdes is up and initialization is complete */
#define LS_TRACE_DOWN 0x04000000
/* Set PHY clock to 25 MHz (else 156.125 MHz) */
#define LS_PHY_CLK_25MHZ 0x02000000
#define LS_PHY_CLK_EN 0x01000000 /* Enable clock to PHY */ #define LS_PHY_CLK_EN 0x01000000 /* Enable clock to PHY */
#define LS_XAUI_LINK_UP 0x00000010 /* XAUI link is up */ #define LS_XAUI_LINK_UP 0x00000010 /* XAUI link is up */
#define LS_XAUI_LINK_CHNG 0x00000008 /* XAUI link status has changed */ /* XAUI link status has changed */
#define LS_XAUI_LINK_CHNG 0x00000008
#define LS_LINK_ALARM 0x00000004 /* Link alarm pin */ #define LS_LINK_ALARM 0x00000004 /* Link alarm pin */
#define LS_ATTN_CTRL_MASK 0x00000003 /* Mask link attention control bits */ /* Mask link attention control bits */
#define LS_ATTN_CTRL_MASK 0x00000003
#define LS_ATTN_ALARM 0x00000000 /* 00 => Attn on link alarm */ #define LS_ATTN_ALARM 0x00000000 /* 00 => Attn on link alarm */
#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 /* 01 => Attn on link alarm or status change */ /* 01 => Attn on link alarm or status change */
#define LS_ATTN_STAT_CHNG 0x00000002 /* 10 => Attn on link status change */ #define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001
/* 10 => Attn on link status change */
#define LS_ATTN_STAT_CHNG 0x00000002
#define LS_ATTN_NONE 0x00000003 /* 11 => no Attn */ #define LS_ATTN_NONE 0x00000003 /* 11 => no Attn */
/* Link Address High Registers */ /* Link Address High Registers */
#define LINK_ADDR_ENABLE 0x80000000 /* Enable this link address */ #define LINK_ADDR_ENABLE 0x80000000 /* Enable this link address */
/* /*
...@@ -404,12 +511,12 @@ struct sxg_hw_regs { ...@@ -404,12 +511,12 @@ struct sxg_hw_regs {
#define XGXS_ADDRESS_STATUS1 0x0001 /* XS Status 1 */ #define XGXS_ADDRESS_STATUS1 0x0001 /* XS Status 1 */
#define XGXS_ADDRESS_DEVID_LOW 0x0002 /* XS Device ID (low) */ #define XGXS_ADDRESS_DEVID_LOW 0x0002 /* XS Device ID (low) */
#define XGXS_ADDRESS_DEVID_HIGH 0x0003 /* XS Device ID (high) */ #define XGXS_ADDRESS_DEVID_HIGH 0x0003 /* XS Device ID (high) */
#define XGXS_ADDRESS_SPEED 0x0004 /* XS Speed ability */ #define XGXS_ADDRESS_SPEED 0x0004 /* XS Speed ability */
#define XGXS_ADDRESS_DEV_LOW 0x0005 /* XS Devices in package */ #define XGXS_ADDRESS_DEV_LOW 0x0005 /* XS Devices in package */
#define XGXS_ADDRESS_DEV_HIGH 0x0006 /* XS Devices in package */ #define XGXS_ADDRESS_DEV_HIGH 0x0006 /* XS Devices in package */
#define XGXS_ADDRESS_STATUS2 0x0008 /* XS Status 2 */ #define XGXS_ADDRESS_STATUS2 0x0008 /* XS Status 2 */
#define XGXS_ADDRESS_PKGID_lOW 0x000E /* XS Package Identifier */ #define XGXS_ADDRESS_PKGID_lOW 0x000E /* XS Package Identifier */
#define XGXS_ADDRESS_PKGID_HIGH 0x000F /* XS Package Identifier */ #define XGXS_ADDRESS_PKGID_HIGH 0x000F /* XS Package Identifier */
#define XGXS_ADDRESS_LANE_STATUS 0x0018 /* 10G XGXS Lane Status */ #define XGXS_ADDRESS_LANE_STATUS 0x0018 /* 10G XGXS Lane Status */
#define XGXS_ADDRESS_TEST_CTRL 0x0019 /* 10G XGXS Test Control */ #define XGXS_ADDRESS_TEST_CTRL 0x0019 /* 10G XGXS Test Control */
#define XGXS_ADDRESS_RESET_LO1 0x8000 /* Vendor-Specific Reset Lo 1 */ #define XGXS_ADDRESS_RESET_LO1 0x8000 /* Vendor-Specific Reset Lo 1 */
...@@ -423,7 +530,8 @@ struct sxg_hw_regs { ...@@ -423,7 +530,8 @@ struct sxg_hw_regs {
#define XGXS_CONTROL1_SPEED1 0x2000 /* 0 = unspecified, 1 = 10Gb+ */ #define XGXS_CONTROL1_SPEED1 0x2000 /* 0 = unspecified, 1 = 10Gb+ */
#define XGXS_CONTROL1_LOWPOWER 0x0400 /* 1 = Low power mode */ #define XGXS_CONTROL1_LOWPOWER 0x0400 /* 1 = Low power mode */
#define XGXS_CONTROL1_SPEED2 0x0040 /* Same as SPEED1 (?) */ #define XGXS_CONTROL1_SPEED2 0x0040 /* Same as SPEED1 (?) */
#define XGXS_CONTROL1_SPEED 0x003C /* Everything reserved except zero (?) */ /* Everything reserved except zero (?) */
#define XGXS_CONTROL1_SPEED 0x003C
/* XS Status 1 register bit definitions */ /* XS Status 1 register bit definitions */
#define XGXS_STATUS1_FAULT 0x0080 /* Fault detected */ #define XGXS_STATUS1_FAULT 0x0080 /* Fault detected */
...@@ -439,7 +547,7 @@ struct sxg_hw_regs { ...@@ -439,7 +547,7 @@ struct sxg_hw_regs {
#define XGXS_DEVICES_PCS 0x0008 /* PCS Present */ #define XGXS_DEVICES_PCS 0x0008 /* PCS Present */
#define XGXS_DEVICES_WIS 0x0004 /* WIS Present */ #define XGXS_DEVICES_WIS 0x0004 /* WIS Present */
#define XGXS_DEVICES_PMD 0x0002 /* PMD/PMA Present */ #define XGXS_DEVICES_PMD 0x0002 /* PMD/PMA Present */
#define XGXS_DEVICES_CLAUSE22 0x0001 /* Clause 22 registers present */ #define XGXS_DEVICES_CLAUSE22 0x0001 /* Clause 22 registers present*/
/* XS Devices High register bit definitions */ /* XS Devices High register bit definitions */
#define XGXS_DEVICES_VENDOR2 0x8000 /* Vendor specific device 2 */ #define XGXS_DEVICES_VENDOR2 0x8000 /* Vendor specific device 2 */
...@@ -457,7 +565,7 @@ struct sxg_hw_regs { ...@@ -457,7 +565,7 @@ struct sxg_hw_regs {
#define XGXS_PKGID_HIGH_REV 0x000F /* Revision Number */ #define XGXS_PKGID_HIGH_REV 0x000F /* Revision Number */
/* XS Lane Status register bit definitions */ /* XS Lane Status register bit definitions */
#define XGXS_LANE_PHY 0x1000 /* PHY/DTE lane alignment status */ #define XGXS_LANE_PHY 0x1000 /* PHY/DTE lane alignment status */
#define XGXS_LANE_PATTERN 0x0800 /* Pattern testing ability */ #define XGXS_LANE_PATTERN 0x0800 /* Pattern testing ability */
#define XGXS_LANE_LOOPBACK 0x0400 /* PHY loopback ability */ #define XGXS_LANE_LOOPBACK 0x0400 /* PHY loopback ability */
#define XGXS_LANE_SYNC3 0x0008 /* Lane 3 sync */ #define XGXS_LANE_SYNC3 0x0008 /* Lane 3 sync */
...@@ -478,7 +586,10 @@ struct sxg_hw_regs { ...@@ -478,7 +586,10 @@ struct sxg_hw_regs {
* *
* Full register descriptions can be found in PHY/XENPAK/IEEE specs * Full register descriptions can be found in PHY/XENPAK/IEEE specs
*/ */
/* LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device) */ /*
* LASI (Link Alarm Status Interrupt) Registers (located in
* MIIM_DEV_PHY_PMA device)
*/
#define LASI_RX_ALARM_CONTROL 0x9000 /* LASI RX_ALARM Control */ #define LASI_RX_ALARM_CONTROL 0x9000 /* LASI RX_ALARM Control */
#define LASI_TX_ALARM_CONTROL 0x9001 /* LASI TX_ALARM Control */ #define LASI_TX_ALARM_CONTROL 0x9001 /* LASI TX_ALARM Control */
#define LASI_CONTROL 0x9002 /* LASI Control */ #define LASI_CONTROL 0x9002 /* LASI Control */
...@@ -487,9 +598,12 @@ struct sxg_hw_regs { ...@@ -487,9 +598,12 @@ struct sxg_hw_regs {
#define LASI_STATUS 0x9005 /* LASI Status */ #define LASI_STATUS 0x9005 /* LASI Status */
/* LASI_CONTROL bit definitions */ /* LASI_CONTROL bit definitions */
#define LASI_CTL_RX_ALARM_ENABLE 0x0004 /* Enable RX_ALARM interrupts */ /* Enable RX_ALARM interrupts */
#define LASI_CTL_TX_ALARM_ENABLE 0x0002 /* Enable TX_ALARM interrupts */ #define LASI_CTL_RX_ALARM_ENABLE 0x0004
#define LASI_CTL_LS_ALARM_ENABLE 0x0001 /* Enable Link Status interrupts */ /* Enable TX_ALARM interrupts */
#define LASI_CTL_TX_ALARM_ENABLE 0x0002
/* Enable Link Status interrupts */
#define LASI_CTL_LS_ALARM_ENABLE 0x0001
/* LASI_STATUS bit definitions */ /* LASI_STATUS bit definitions */
#define LASI_STATUS_RX_ALARM 0x0004 /* RX_ALARM status */ #define LASI_STATUS_RX_ALARM 0x0004 /* RX_ALARM status */
...@@ -499,7 +613,7 @@ struct sxg_hw_regs { ...@@ -499,7 +613,7 @@ struct sxg_hw_regs {
/* PHY registers - PMA/PMD (device 1) */ /* PHY registers - PMA/PMD (device 1) */
#define PHY_PMA_CONTROL1 0x0000 /* PMA/PMD Control 1 */ #define PHY_PMA_CONTROL1 0x0000 /* PMA/PMD Control 1 */
#define PHY_PMA_STATUS1 0x0001 /* PMA/PMD Status 1 */ #define PHY_PMA_STATUS1 0x0001 /* PMA/PMD Status 1 */
#define PHY_PMA_RCV_DET 0x000A /* PMA/PMD Receive Signal Detect */ #define PHY_PMA_RCV_DET 0x000A /* PMA/PMD Receive Signal Detect */
/* other PMA/PMD registers exist and can be defined as needed */ /* other PMA/PMD registers exist and can be defined as needed */
/* PHY registers - PCS (device 3) */ /* PHY registers - PCS (device 3) */
...@@ -518,10 +632,10 @@ struct sxg_hw_regs { ...@@ -518,10 +632,10 @@ struct sxg_hw_regs {
#define PMA_CONTROL1_RESET 0x8000 /* PMA/PMD reset */ #define PMA_CONTROL1_RESET 0x8000 /* PMA/PMD reset */
/* PHY_PMA_RCV_DET register bit definitions */ /* PHY_PMA_RCV_DET register bit definitions */
#define PMA_RCV_DETECT 0x0001 /* PMA/PMD receive signal detect */ #define PMA_RCV_DETECT 0x0001 /* PMA/PMD receive signal detect */
/* PHY_PCS_10G_STATUS1 register bit definitions */ /* PHY_PCS_10G_STATUS1 register bit definitions */
#define PCS_10B_BLOCK_LOCK 0x0001 /* PCS 10GBASE-R locked to receive blocks */ #define PCS_10B_BLOCK_LOCK 0x0001 /* PCS 10GBASE-R locked to receive blocks */
/* PHY_XS_LANE_STATUS register bit definitions */ /* PHY_XS_LANE_STATUS register bit definitions */
#define XS_LANE_ALIGN 0x1000 /* XS transmit lanes aligned */ #define XS_LANE_ALIGN 0x1000 /* XS transmit lanes aligned */
...@@ -559,13 +673,20 @@ struct phy_ucode { ...@@ -559,13 +673,20 @@ struct phy_ucode {
#pragma pack(push, 1) #pragma pack(push, 1)
struct xmt_desc { struct xmt_desc {
ushort XmtLen; /* word 0, bits [15:0] - transmit length */ ushort XmtLen; /* word 0, bits [15:0] - transmit length */
unsigned char XmtCtl; /* word 0, bits [23:16] - transmit control byte */ /* word 0, bits [23:16] - transmit control byte */
unsigned char Cmd; /* word 0, bits [31:24] - transmit command plus misc. */ unsigned char XmtCtl;
u32 XmtBufId; /* word 1, bits [31:0] - transmit buffer ID */ /* word 0, bits [31:24] - transmit command plus misc. */
unsigned char TcpStrt; /* word 2, bits [7:0] - byte address of TCP header */ unsigned char Cmd;
unsigned char IpStrt; /* word 2, bits [15:8] - byte address of IP header */ /* word 1, bits [31:0] - transmit buffer ID */
ushort IpCkSum; /* word 2, bits [31:16] - partial IP checksum */ u32 XmtBufId;
ushort TcpCkSum; /* word 3, bits [15:0] - partial TCP checksum */ /* word 2, bits [7:0] - byte address of TCP header */
unsigned char TcpStrt;
/* word 2, bits [15:8] - byte address of IP header */
unsigned char IpStrt;
/* word 2, bits [31:16] - partial IP checksum */
ushort IpCkSum;
/* word 3, bits [15:0] - partial TCP checksum */
ushort TcpCkSum;
ushort Rsvd1; /* word 3, bits [31:16] - PAD */ ushort Rsvd1; /* word 3, bits [31:16] - PAD */
u32 Rsvd2; /* word 4, bits [31:0] - PAD */ u32 Rsvd2; /* word 4, bits [31:0] - PAD */
u32 Rsvd3; /* word 5, bits [31:0] - PAD */ u32 Rsvd3; /* word 5, bits [31:0] - PAD */
...@@ -580,42 +701,58 @@ struct xmt_desc { ...@@ -580,42 +701,58 @@ struct xmt_desc {
#define XMT_DESC_CMD_CSUM_INSERT 1 /* checksum insert descriptor */ #define XMT_DESC_CMD_CSUM_INSERT 1 /* checksum insert descriptor */
#define XMT_DESC_CMD_FORMAT 2 /* format descriptor */ #define XMT_DESC_CMD_FORMAT 2 /* format descriptor */
#define XMT_DESC_CMD_PRIME 3 /* prime descriptor */ #define XMT_DESC_CMD_PRIME 3 /* prime descriptor */
#define XMT_DESC_CMD_CODE_SHFT 6 /* comand code shift (shift to bits [31:30] in word 0) */ /* comand code shift (shift to bits [31:30] in word 0) */
#define XMT_DESC_CMD_CODE_SHFT 6
/* shifted command codes */ /* shifted command codes */
#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT) #define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT)
#define XMT_CSUM_INSERT (XMT_DESC_CMD_CSUM_INSERT << XMT_DESC_CMD_CODE_SHFT) #define XMT_CSUM_INSERT (XMT_DESC_CMD_CSUM_INSERT << XMT_DESC_CMD_CODE_SHFT)
#define XMT_FORMAT (XMT_DESC_CMD_FORMAT << XMT_DESC_CMD_CODE_SHFT) #define XMT_FORMAT (XMT_DESC_CMD_FORMAT << XMT_DESC_CMD_CODE_SHFT)
#define XMT_PRIME (XMT_DESC_CMD_PRIME << XMT_DESC_CMD_CODE_SHFT) #define XMT_PRIME (XMT_DESC_CMD_PRIME << XMT_DESC_CMD_CODE_SHFT)
/* /*
* struct xmt_desc Control Byte (XmtCtl) definitions * struct xmt_desc Control Byte (XmtCtl) definitions
* NOTE: These bits do not work on Sahara (Rev A)! * NOTE: These bits do not work on Sahara (Rev A)!
*/ */
#define XMT_CTL_PAUSE_FRAME 0x80 /* current frame is a pause control frame (for statistics) */ /* current frame is a pause control frame (for statistics) */
#define XMT_CTL_CONTROL_FRAME 0x40 /* current frame is a control frame (for statistics) */ #define XMT_CTL_PAUSE_FRAME 0x80
/* current frame is a control frame (for statistics) */
#define XMT_CTL_CONTROL_FRAME 0x40
#define XMT_CTL_PER_PKT_QUAL 0x20 /* per packet qualifier */ #define XMT_CTL_PER_PKT_QUAL 0x20 /* per packet qualifier */
#define XMT_CTL_PAD_MODE_NONE 0x00 /* do not pad frame */ #define XMT_CTL_PAD_MODE_NONE 0x00 /* do not pad frame */
#define XMT_CTL_PAD_MODE_64 0x08 /* pad frame to 64 bytes */ #define XMT_CTL_PAD_MODE_64 0x08 /* pad frame to 64 bytes */
#define XMT_CTL_PAD_MODE_VLAN_68 0x10 /* pad frame to 64 bytes, and VLAN frames to 68 bytes */ /* pad frame to 64 bytes, and VLAN frames to 68 bytes */
#define XMT_CTL_PAD_MODE_VLAN_68 0x10
#define XMT_CTL_PAD_MODE_68 0x18 /* pad frame to 68 bytes */ #define XMT_CTL_PAD_MODE_68 0x18 /* pad frame to 68 bytes */
#define XMT_CTL_GEN_FCS 0x04 /* generate FCS (CRC) for this frame */ /* generate FCS (CRC) for this frame */
#define XMT_CTL_GEN_FCS 0x04
#define XMT_CTL_DELAY_FCS_0 0x00 /* do not delay FCS calcution */ #define XMT_CTL_DELAY_FCS_0 0x00 /* do not delay FCS calcution */
#define XMT_CTL_DELAY_FCS_1 0x01 /* delay FCS calculation by 1 (4-byte) word */ /* delay FCS calculation by 1 (4-byte) word */
#define XMT_CTL_DELAY_FCS_2 0x02 /* delay FCS calculation by 2 (4-byte) words */ #define XMT_CTL_DELAY_FCS_1 0x01
#define XMT_CTL_DELAY_FCS_3 0x03 /* delay FCS calculation by 3 (4-byte) words */ /* delay FCS calculation by 2 (4-byte) words */
#define XMT_CTL_DELAY_FCS_2 0x02
/* delay FCS calculation by 3 (4-byte) words */
#define XMT_CTL_DELAY_FCS_3 0x03
/* struct xmt_desc XmtBufId definition */ /* struct xmt_desc XmtBufId definition */
#define XMT_BUF_ID_SHFT 8 /* The Xmt buffer ID is formed by dividing */ /*
/* the buffer (DRAM) address by 256 (or << 8) */ * The Xmt buffer ID is formed by dividing the buffer (DRAM) address
* by 256 (or << 8)
*/
#define XMT_BUF_ID_SHFT 8
/* Receiver Sequencer Definitions */ /* Receiver Sequencer Definitions */
/* Receive Event Queue (queues 3 - 6) bit definitions */ /* Receive Event Queue (queues 3 - 6) bit definitions */
#define RCV_EVTQ_RBFID_MASK 0x0000FFFF /* bit mask for the Receive Buffer ID */ /* bit mask for the Receive Buffer ID */
#define RCV_EVTQ_RBFID_MASK 0x0000FFFF
/* Receive Buffer ID definition */ /* Receive Buffer ID definition */
#define RCV_BUF_ID_SHFT 5 /* The Rcv buffer ID is formed by dividing */ /*
/* the buffer (DRAM) address by 32 (or << 5) */ * The Rcv buffer ID is formed by dividing the buffer (DRAM) address
* by 32 (or << 5)
*/
#define RCV_BUF_ID_SHFT 5
/* /*
* Format of the 18 byte Receive Buffer returned by the * Format of the 18 byte Receive Buffer returned by the
...@@ -628,73 +765,86 @@ struct rcv_buf_hdr { ...@@ -628,73 +765,86 @@ struct rcv_buf_hdr {
union { union {
ushort TcpCsum; /* TCP checksum */ ushort TcpCsum; /* TCP checksum */
struct { struct {
unsigned char TcpCsumL; /* lower 8 bits of the TCP checksum */ /* lower 8 bits of the TCP checksum */
unsigned char LinkHash; /* Link hash (multicast frames only) */ unsigned char TcpCsumL;
/* Link hash (multicast frames only) */
unsigned char LinkHash;
}; };
}; };
ushort SktHash; /* Socket hash */ ushort SktHash; /* Socket hash */
unsigned char TcpHdrOffset; /* TCP header offset into packet */ unsigned char TcpHdrOffset; /* TCP header offset into packet */
unsigned char IpHdrOffset; /* IP header offset into packet */ unsigned char IpHdrOffset; /* IP header offset into packet */
u32 TpzHash; /* Toeplitz hash */ u32 TpzHash; /* Toeplitz hash */
ushort Reserved; /* Reserved */ ushort Reserved; /* Reserved */
}; };
#pragma pack(pop) #pragma pack(pop)
/* Queue definitions */ /* Queue definitions */
/* Ingress (read only) queue numbers */ /* Ingress (read only) queue numbers */
#define PXY_BUF_Q 0 /* Proxy Buffer Queue */ #define PXY_BUF_Q 0 /* Proxy Buffer Queue */
#define HST_EVT_Q 1 /* Host Event Queue */ #define HST_EVT_Q 1 /* Host Event Queue */
#define XMT_BUF_Q 2 /* Transmit Buffer Queue */ #define XMT_BUF_Q 2 /* Transmit Buffer Queue */
#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */ #define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */
#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */ #define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */
#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */ #define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */
#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */ #define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */
#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */ #define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */
/* Local (read/write) queue numbers */ /* Local (read/write) queue numbers */
#define LOCAL_A_Q 8 /* Spare local Queue */ #define LOCAL_A_Q 8 /* Spare local Queue */
#define LOCAL_B_Q 9 /* Spare local Queue */ #define LOCAL_B_Q 9 /* Spare local Queue */
#define LOCAL_C_Q 10 /* Spare local Queue */ #define LOCAL_C_Q 10 /* Spare local Queue */
#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */ #define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */
#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */ #define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */
#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */ #define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue*/
#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */ #define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */
#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */ #define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */
/* Egress (write only) queue numbers */ /* Egress (write only) queue numbers */
#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */ #define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */
#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */ #define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */
#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */ #define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */
#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */ #define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */
#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */ #define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */
#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */ #define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */
#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */ #define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */
#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */ #define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */
#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */ #define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */
#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */ #define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */
#define RCV_BUF_Q 26 /* Receive Buffer Queue */ #define RCV_BUF_Q 26 /* Receive Buffer Queue */
/* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */ /* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */
#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */ /* enable copy of xmt descriptor to xmt command queue */
#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */ #define PXY_COPY_EN 0x00200000
#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */ #define PXY_SIZE_16 0x00000000 /* copy 16 bytes */
#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */
/* SXG EEPROM/Flash Configuration Definitions */ /* SXG EEPROM/Flash Configuration Definitions */
/* Location of configuration data in EEPROM or Flash */ /* Location of configuration data in EEPROM or Flash */
#define EEPROM_CONFIG_START_ADDR 0x00 /* start addr for config info in EEPROM */ /* start addr for config info in EEPROM */
#define FLASH_CONFIG_START_ADDR 0x80 /* start addr for config info in Flash */ #define EEPROM_CONFIG_START_ADDR 0x00
/* start addr for config info in Flash */
#define FLASH_CONFIG_START_ADDR 0x80
/* Configuration data section defines */ /* Configuration data section defines */
#define HW_CFG_SECTION_SIZE 512 /* size of H/W section */ #define HW_CFG_SECTION_SIZE 512 /* size of H/W section */
#define HW_CFG_SECTION_SIZE_A 256 /* size of H/W section (Sahara rev A) */ #define HW_CFG_SECTION_SIZE_A 256 /* size of H/W section (Sahara rev A) */
#define SW_CFG_SECTION_START 512 /* starting location (offset) of S/W section */ /* starting location (offset) of S/W section */
#define SW_CFG_SECTION_START_A 256 /* starting location (offset) of S/W section (Sahara rev A) */ #define SW_CFG_SECTION_START 512
/* starting location (offset) of S/W section (Sahara rev A) */
#define SW_CFG_SECTION_START_A 256
#define SW_CFG_SECTION_SIZE 128 /* size of S/W section */ #define SW_CFG_SECTION_SIZE 128 /* size of S/W section */
/*
* H/W configuration data magic word Goes in Addr field of first
* struct hw_cfg_data entry
*/
#define HW_CFG_MAGIC_WORD 0xA5A5
/*
* H/W configuration data terminator Goes in Addr field of last
* struct hw_cfg_data entry
*/
#define HW_CFG_TERMINATOR 0xFFFF
#define HW_CFG_MAGIC_WORD 0xA5A5 /* H/W configuration data magic word */
/* Goes in Addr field of first struct hw_cfg_data entry */
#define HW_CFG_TERMINATOR 0xFFFF /* H/W configuration data terminator */
/* Goes in Addr field of last struct hw_cfg_data entry */
#define SW_CFG_MAGIC_WORD 0x5A5A /* S/W configuration data magic word */ #define SW_CFG_MAGIC_WORD 0x5A5A /* S/W configuration data magic word */
#pragma pack(push, 1) #pragma pack(push, 1)
...@@ -703,21 +853,23 @@ struct rcv_buf_hdr { ...@@ -703,21 +853,23 @@ struct rcv_buf_hdr {
* Read by the Sahara hardware * Read by the Sahara hardware
*/ */
struct hw_cfg_data { struct hw_cfg_data {
ushort Addr; ushort Addr;
ushort Data; ushort Data;
}; };
/* /*
* Number of struct hw_cfg_data structures to put in the configuration data * Number of struct hw_cfg_data structures to put in the configuration data
* data structure (struct sxg_config or struct sxg_config_a). The number is computed * data structure (struct sxg_config or struct sxg_config_a). The number is
* to fill the entire H/W config section of the structure. * computed to fill the entire H/W config section of the structure.
*/ */
#define NUM_HW_CFG_ENTRIES (HW_CFG_SECTION_SIZE / sizeof(struct hw_cfg_data)) #define NUM_HW_CFG_ENTRIES \
#define NUM_HW_CFG_ENTRIES_A (HW_CFG_SECTION_SIZE_A / sizeof(struct hw_cfg_data)) (HW_CFG_SECTION_SIZE / sizeof(struct hw_cfg_data))
#define NUM_HW_CFG_ENTRIES_A \
(HW_CFG_SECTION_SIZE_A / sizeof(struct hw_cfg_data))
/* MAC address structure */ /* MAC address structure */
struct sxg_config_mac { struct sxg_config_mac {
unsigned char MacAddr[6]; /* MAC Address */ unsigned char MacAddr[6]; /* MAC Address */
}; };
/* FRU data structure */ /* FRU data structure */
...@@ -800,10 +952,13 @@ struct sxg_config_a { ...@@ -800,10 +952,13 @@ struct sxg_config_a {
* at compile time. * at compile time.
*/ */
compile_time_assert (offsetof(struct sxg_config, SwCfg) == SW_CFG_SECTION_START); compile_time_assert (offsetof(struct sxg_config, SwCfg) == SW_CFG_SECTION_START);
compile_time_assert (sizeof(struct sxg_config) == HW_CFG_SECTION_SIZE + SW_CFG_SECTION_SIZE); compile_time_assert (sizeof(struct sxg_config) == HW_CFG_SECTION_SIZE
+ SW_CFG_SECTION_SIZE);
compile_time_assert (offsetof(struct sxg_config_a, SwCfg) == SW_CFG_SECTION_START_A); compile_time_assert (offsetof(struct sxg_config_a, SwCfg)
compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A + SW_CFG_SECTION_SIZE); == SW_CFG_SECTION_START_A);
compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A
+ SW_CFG_SECTION_SIZE);
#endif #endif
/* /*
* Structure used to pass information between driver and user-mode * Structure used to pass information between driver and user-mode
...@@ -811,10 +966,11 @@ compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A + SW_C ...@@ -811,10 +966,11 @@ compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A + SW_C
*/ */
struct adapt_userinfo { struct adapt_userinfo {
bool LinkUp; bool LinkUp;
/* u32 LinkState; * use LinkUp - any need for other states? */ /* use LinkUp - any need for other states? */
/* u32 LinkState; */
u32 LinkSpeed; /* not currently needed */ u32 LinkSpeed; /* not currently needed */
u32 LinkDuplex; /* not currently needed */ u32 LinkDuplex; /* not currently needed */
u32 Port; /* not currently needed */ u32 Port; /* not currently needed */
u32 PhysPort; /* not currently needed */ u32 PhysPort; /* not currently needed */
ushort PciLanes; ushort PciLanes;
unsigned char MacAddr[6]; unsigned char MacAddr[6];
...@@ -837,11 +993,16 @@ enum ASIC_TYPE{ ...@@ -837,11 +993,16 @@ enum ASIC_TYPE{
/* Sahara (ASIC level) defines */ /* Sahara (ASIC level) defines */
#define SAHARA_GRAM_SIZE 0x020000 /* GRAM size - 128 KB */ #define SAHARA_GRAM_SIZE 0x020000 /* GRAM size - 128 KB */
#define SAHARA_DRAM_SIZE 0x200000 /* DRAM size - 2 MB */ #define SAHARA_DRAM_SIZE 0x200000 /* DRAM size - 2 MB */
#define SAHARA_QRAM_SIZE 0x004000 /* QRAM size - 16K entries (64 KB) */ /* QRAM size - 16K entries (64 KB) */
#define SAHARA_WCS_SIZE 0x002000 /* WCS - 8K instructions (x 108 bits) */ #define SAHARA_QRAM_SIZE 0x004000
/* WCS - 8K instructions (x 108 bits) */
#define SAHARA_WCS_SIZE 0x002000
/* Arabia (board level) defines */ /* Arabia (board level) defines */
#define FLASH_SIZE 0x080000 /* 512 KB (4 Mb) */ #define FLASH_SIZE 0x080000 /* 512 KB (4 Mb) */
#define EEPROM_SIZE_XFMR 1024 /* EEPROM size (bytes), including xfmr area */ /* EEPROM size (bytes), including xfmr area */
#define EEPROM_SIZE_NO_XFMR 640 /* EEPROM size excluding xfmr area (512 + 128) */ #define EEPROM_SIZE_XFMR 1024
#define EEPROM_SIZE_REV_A 512 /* EEPROM size for Sahara rev A */ /* EEPROM size excluding xfmr area (512 + 128) */
#define EEPROM_SIZE_NO_XFMR 640
/* EEPROM size for Sahara rev A */
#define EEPROM_SIZE_REV_A 512
...@@ -14,7 +14,10 @@ ...@@ -14,7 +14,10 @@
* type of transceiver. * type of transceiver.
*/ */
/* Download for AEL2005C PHY with SR/LR transceiver (10GBASE-SR or 10GBASE-LR) */ /*
* Download for AEL2005C PHY with SR/LR transceiver
* (10GBASE-SR or 10GBASE-LR)
*/
static struct phy_ucode PhyUcode[] = { static struct phy_ucode PhyUcode[] = {
/* /*
* NOTE: An address of 0 is a special case. When the download routine * NOTE: An address of 0 is a special case. When the download routine
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment