Commit ddd6f0a8 authored by Mithlesh Thukral's avatar Mithlesh Thukral Committed by Greg Kroah-Hartman

Staging: sxg: Commenting style fixes - Pending work

This patch cleans up the comment. Converts the comments to C89 style.
Fixes comment related TODO item.
Signed-off-by: default avatarLinSysSoft Sahara Team <saharaproj@linsyssoft.com>
Signed-off-by: default avatarChristopher Harrer <charrer@alacritech.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 942798b4
......@@ -171,12 +171,6 @@ static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
/***********************************************************************
************************************************************************
************************************************************************
************************************************************************
************************************************************************/
static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
{
writel(value, reg);
......@@ -278,10 +272,12 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
/* First, reset the card */
WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
/* Download each section of the microcode as specified in */
/* its download file. The *download.c file is generated using */
/* the saharaobjtoc facility which converts the metastep .obj */
/* file to a .c file which contains a two dimentional array. */
/*
* Download each section of the microcode as specified in
* its download file. The *download.c file is generated using
* the saharaobjtoc facility which converts the metastep .obj
* file to a .c file which contains a two dimentional array.
*/
for (Section = 0; Section < numSections; Section++) {
DBG_ERROR("sxg: SECTION # %d\n", Section);
switch (UcodeSel) {
......@@ -309,19 +305,23 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
/* Write instruction address with the WRITE bit set */
WRITE_REG(HwRegs->UcodeAddr,
(Address | MICROCODE_ADDRESS_WRITE), FLUSH);
/* Sahara bug in the ucode download logic - the write to DataLow */
/* for the next instruction could get corrupted. To avoid this, */
/* write to DataLow again for this instruction (which may get */
/* corrupted, but it doesn't matter), then increment the address */
/* and write the data for the next instruction to DataLow. That */
/* write should succeed. */
/*
* Sahara bug in the ucode download logic - the write to DataLow
* for the next instruction could get corrupted. To avoid this,
* write to DataLow again for this instruction (which may get
* corrupted, but it doesn't matter), then increment the address
* and write the data for the next instruction to DataLow. That
* write should succeed.
*/
WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
/* Advance 3 u32S to start of next instruction */
Instruction += 3;
}
}
/* Now repeat the entire operation reading the instruction back and */
/* checking for parity errors */
/*
* Now repeat the entire operation reading the instruction back and
* checking for parity errors
*/
for (Section = 0; Section < numSections; Section++) {
DBG_ERROR("sxg: check SECTION # %d\n", Section);
switch (UcodeSel) {
......@@ -376,8 +376,10 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
/* Everything OK, Go. */
WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
/* Poll the CardUp register to wait for microcode to initialize */
/* Give up after 10,000 attemps (500ms). */
/*
* Poll the CardUp register to wait for microcode to initialize
* Give up after 10,000 attemps (500ms).
*/
for (i = 0; i < 10000; i++) {
udelay(50);
READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
......@@ -391,9 +393,11 @@ static bool sxg_download_microcode(struct adapter_t *adapter, enum SXG_UCODE_SEL
return (FALSE); /* Timeout */
}
/* Now write the LoadSync register. This is used to */
/* synchronize with the card so it can scribble on the memory */
/* that contained 0xCAFE from the "CardUp" step above */
/*
* Now write the LoadSync register. This is used to
* synchronize with the card so it can scribble on the memory
* that contained 0xCAFE from the "CardUp" step above
*/
if (UcodeSel == SXG_UCODE_SAHARA) {
WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
}
......@@ -449,21 +453,27 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
InitializeListHead(&adapter->FreeSglBuffers);
InitializeListHead(&adapter->AllSglBuffers);
/* Mark these basic allocations done. This flags essentially */
/* tells the SxgFreeResources routine that it can grab spinlocks */
/* and reference listheads. */
/*
* Mark these basic allocations done. This flags essentially
* tells the SxgFreeResources routine that it can grab spinlocks
* and reference listheads.
*/
adapter->BasicAllocations = TRUE;
/* Main allocation loop. Start with the maximum supported by */
/* the microcode and back off if memory allocation */
/* fails. If we hit a minimum, fail. */
/*
* Main allocation loop. Start with the maximum supported by
* the microcode and back off if memory allocation
* fails. If we hit a minimum, fail.
*/
for (;;) {
DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
(unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
/* Start with big items first - receive and transmit rings. At the moment */
/* I'm going to keep the ring size fixed and adjust the number of */
/* TCBs if we fail. Later we might consider reducing the ring size as well.. */
/*
* Start with big items first - receive and transmit rings. At the moment
* I'm going to keep the ring size fixed and adjust the
* TCBs if we fail. Later we might consider reducing the ring size as well..
*/
adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
sizeof(struct sxg_xmt_ring) *
1,
......@@ -518,8 +528,10 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
/* Allocate receive data buffers. We allocate a block of buffers and */
/* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK */
/*
* Allocate receive data buffers. We allocate a block of buffers and
* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
*/
for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
sxg_allocate_buffer_memory(adapter,
......@@ -527,8 +539,10 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV);
}
/* NBL resource allocation can fail in the 'AllocateComplete' routine, which */
/* doesn't return status. Make sure we got the number of buffers we requested */
/*
* NBL resource allocation can fail in the 'AllocateComplete' routine, which
* doesn't return status. Make sure we got the number of buffers we requested
*/
if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
......@@ -597,7 +611,6 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
*
* Arguments -
* pcidev - A pointer to our adapter structure
*
*/
static void sxg_config_pci(struct pci_dev *pcidev)
{
......@@ -628,7 +641,7 @@ static unsigned char temp_mac_address[6] = { 0x00, 0xab, 0xcd, 0xef, 0x12, 0x69
*/
static inline int sxg_read_config(struct adapter_t *adapter)
{
//struct sxg_config data;
/* struct sxg_config data; */
struct sw_cfg_data *data;
dma_addr_t p_addr;
unsigned long status;
......@@ -636,7 +649,8 @@ static inline int sxg_read_config(struct adapter_t *adapter)
data = pci_alloc_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), &p_addr);
if(!data) {
/* We cant get even this much memory. Raise a hell
/*
* We cant get even this much memory. Raise a hell
* Get out of here
*/
printk(KERN_ERR"%s : Could not allocate memory for reading EEPROM\n", __FUNCTION__);
......@@ -797,8 +811,10 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->UcodeRegs = (void *)memmapped_ioaddr;
adapter->State = SXG_STATE_INITIALIZING;
/* Maintain a list of all adapters anchored by */
/* the global SxgDriver structure. */
/*
* Maintain a list of all adapters anchored by
* the global SxgDriver structure.
*/
adapter->Next = SxgDriver.Adapters;
SxgDriver.Adapters = adapter;
adapter->AdapterID = ++SxgDriver.AdapterID;
......@@ -816,10 +832,12 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
}
/* status = SXG_READ_EEPROM(adapter); */
/* if (!status) { */
/* goto sxg_init_bad; */
/* } */
/*
* status = SXG_READ_EEPROM(adapter);
* if (!status) {
* goto sxg_init_bad;
* }
*/
DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
sxg_config_pci(pcidev);
......@@ -911,10 +929,8 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
return -ENODEV;
}
/***********************************************************************
* LINE BASE Interrupt routines..
***********************************************************************/
/*
* LINE BASE Interrupt routines..
*
* sxg_disable_interrupt
*
......@@ -934,9 +950,7 @@ static void sxg_disable_interrupt(struct adapter_t *adapter)
/* For now, RSS is disabled with line based interrupts */
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
/* */
/* Turn off interrupts by writing to the icr register. */
/* */
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
adapter->InterruptsEnabled = 0;
......@@ -946,7 +960,6 @@ static void sxg_disable_interrupt(struct adapter_t *adapter)
}
/*
*
* sxg_enable_interrupt
*
* EnableInterrupt Handler
......@@ -965,9 +978,7 @@ static void sxg_enable_interrupt(struct adapter_t *adapter)
/* For now, RSS is disabled with line based interrupts */
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
/* */
/* Turn on interrupts by writing to the icr register. */
/* */
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
adapter->InterruptsEnabled = 1;
......@@ -977,7 +988,6 @@ static void sxg_enable_interrupt(struct adapter_t *adapter)
}
/*
*
* sxg_isr - Process an line-based interrupt
*
* Arguments:
......@@ -996,25 +1006,29 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
adapter->Stats.NumInts++;
if (adapter->Isr[0] == 0) {
/* The SLIC driver used to experience a number of spurious interrupts */
/* due to the delay associated with the masking of the interrupt */
/* (we'd bounce back in here). If we see that again with Sahara, */
/* add a READ_REG of the Icr register after the WRITE_REG below. */
/*
* The SLIC driver used to experience a number of spurious
* interrupts due to the delay associated with the masking of
* the interrupt (we'd bounce back in here). If we see that
* again with Sahara,add a READ_REG of the Icr register after
* the WRITE_REG below.
*/
adapter->Stats.FalseInts++;
return IRQ_NONE;
}
/* */
/* Move the Isr contents and clear the value in */
/* shared memory, and mask interrupts */
/* */
/*
* Move the Isr contents and clear the value in
* shared memory, and mask interrupts
*/
adapter->IsrCopy[0] = adapter->Isr[0];
adapter->Isr[0] = 0;
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
/* ASSERT(adapter->IsrDpcsPending == 0); */
#if XXXTODO /* RSS Stuff */
/* If RSS is enabled and the ISR specifies */
/* SXG_ISR_EVENT, then schedule DPC's */
/* based on event queues. */
/*
* If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
* schedule DPC's based on event queues.
*/
if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
for (i = 0;
i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
......@@ -1030,8 +1044,9 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
}
}
}
/* Now, either schedule the CPUs specified by the CpuMask, */
/* or queue default */
/* Now, either schedule the CPUs specified by the CpuMask,
* or queue default
*/
if (CpuMask) {
*QueueDefault = FALSE;
} else {
......@@ -1040,9 +1055,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
}
*TargetCpus = CpuMask;
#endif
/* */
/* There are no DPCs in Linux, so call the handler now */
/* */
sxg_handle_interrupt(adapter);
return IRQ_HANDLED;
......@@ -1065,7 +1078,6 @@ static void sxg_handle_interrupt(struct adapter_t *adapter)
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
ASSERT(adapter->IsrCopy[0]);
/*/////////////////////////// */
/* Always process the event queue. */
sxg_process_event_queue(adapter,
......@@ -1080,13 +1092,9 @@ static void sxg_handle_interrupt(struct adapter_t *adapter)
return;
}
#endif
/* */
/* Last (or only) DPC processes the ISR and clears the interrupt. */
/* */
NewIsr = sxg_process_isr(adapter, 0);
/* */
/* Reenable interrupts */
/* */
adapter->IsrCopy[0] = 0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
adapter, NewIsr, 0, 0);
......@@ -1103,7 +1111,6 @@ static void sxg_handle_interrupt(struct adapter_t *adapter)
}
/*
*
* sxg_process_isr - Process an interrupt. Called from the line-based and
* message based interrupt DPC routines
*
......@@ -1122,6 +1129,7 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
adapter, Isr, 0, 0);
DBG_ERROR("%s: Entering with %d ISR value\n", __FUNCTION__, Isr);
/* Error */
if (Isr & SXG_ISR_ERR) {
if (Isr & SXG_ISR_PDQF) {
......@@ -1130,12 +1138,14 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
}
/* No host buffer */
if (Isr & SXG_ISR_RMISS) {
/* There is a bunch of code in the SLIC driver which */
/* attempts to process more receive events per DPC */
/* if we start to fall behind. We'll probably */
/* need to do something similar here, but hold */
/* off for now. I don't want to make the code more */
/* complicated than strictly needed. */
/*
* There is a bunch of code in the SLIC driver which
* attempts to process more receive events per DPC
* if we start to fall behind. We'll probablyd
* need to do something similar here, but hold
* off for now. I don't want to make the code more
* complicated than strictly needed.
*/
adapter->Stats.RcvNoBuffer++;
if (adapter->Stats.RcvNoBuffer < 5) {
DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
......@@ -1155,10 +1165,12 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
}
/* Event ring full */
if (Isr & SXG_ISR_ERFULL) {
/* Same issue as RMISS, really. This means the */
/* host is falling behind the card. Need to increase */
/* event ring size, process more events per interrupt, */
/* and/or reduce/remove interrupt aggregation. */
/*
* Same issue as RMISS, really. This means the
* host is falling behind the card. Need to increase
* event ring size, process more events per interrupt,
* and/or reduce/remove interrupt aggregation.
*/
adapter->Stats.EventRingFull++;
DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
__func__);
......@@ -1185,9 +1197,11 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
}
/* Debug - breakpoint hit */
if (Isr & SXG_ISR_BREAK) {
/* At the moment AGDB isn't written to support interactive */
/* debug sessions. When it is, this interrupt will be used */
/* to signal AGDB that it has hit a breakpoint. For now, ASSERT. */
/*
* At the moment AGDB isn't written to support interactive
* debug sessions. When it is, this interrupt will be used
* to signal AGDB that it has hit a breakpoint. For now, ASSERT.
*/
ASSERT(0);
}
/* Heartbeat response */
......@@ -1201,7 +1215,6 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
}
/*
*
* sxg_process_event_queue - Process our event queue
*
* Arguments:
......@@ -1230,14 +1243,18 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
(adapter->State == SXG_STATE_PAUSING) ||
(adapter->State == SXG_STATE_PAUSED) ||
(adapter->State == SXG_STATE_HALTING));
/* We may still have unprocessed events on the queue if */
/* the card crashed. Don't process them. */
/*
* We may still have unprocessed events on the queue if
* the card crashed. Don't process them.
*/
if (adapter->Dead) {
return (0);
}
/* In theory there should only be a single processor that */
/* accesses this queue, and only at interrupt-DPC time. So */
/* we shouldn't need a lock for any of this. */
/*
* In theory there should only be a single processor that
* accesses this queue, and only at interrupt-DPC time. So/
* we shouldn't need a lock for any of this.
*/
while (Event->Status & EVENT_STATUS_VALID) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
Event, Event->Code, Event->Status,
......@@ -1245,10 +1262,8 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
switch (Event->Code) {
case EVENT_CODE_BUFFERS:
ASSERT(!(Event->CommandIndex & 0xFF00)); /* struct sxg_ring_info Head & Tail == unsigned char */
/* */
sxg_complete_descriptor_blocks(adapter,
Event->CommandIndex);
/* */
break;
case EVENT_CODE_SLOWRCV:
--adapter->RcvBuffersOnCard;
......@@ -1258,8 +1273,11 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
/* Add it to our indication list */
SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
IndicationList, num_skbs);
/* In Linux, we just pass up each skb to the protocol above at this point, */
/* there is no capability of an indication list. */
/*
* Linux, we just pass up each skb to the
* protocol above at this point, there is no
* capability of an indication list.
*/
#else
/* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
rx_bytes = Event->Length; /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
......@@ -1278,29 +1296,36 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
__func__, Event->Code);
/* ASSERT(0); */
}
/* See if we need to restock card receive buffers. */
/* There are two things to note here: */
/* First - This test is not SMP safe. The */
/* adapter->BuffersOnCard field is protected via atomic interlocked calls, but */
/* we do not protect it with respect to these tests. The only way to do that */
/* is with a lock, and I don't want to grab a lock every time we adjust the */
/* BuffersOnCard count. Instead, we allow the buffer replenishment to be off */
/* once in a while. The worst that can happen is the card is given one */
/* more-or-less descriptor block than the arbitrary value we've chosen. */
/* No big deal */
/* In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. */
/* Second - We expect this test to rarely evaluate to true. We attempt to */
/* refill descriptor blocks as they are returned to us */
/* (sxg_complete_descriptor_blocks), so The only time this should evaluate */
/* to true is when sxg_complete_descriptor_blocks failed to allocate */
/* receive buffers. */
/*
* See if we need to restock card receive buffers.
* There are two things to note here:
* First - This test is not SMP safe. The
* adapter->BuffersOnCard field is protected via atomic
* interlocked calls, but we do not protect it with respect
* to these tests. The only way to do that is with a lock,
* and I don't want to grab a lock every time we adjust the
* BuffersOnCard count. Instead, we allow the buffer
* replenishment to be off once in a while. The worst that
* can happen is the card is given on more-or-less descriptor
* block than the arbitrary value we've chosen. No big deal
* In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
* is adjusted.
* Second - We expect this test to rarely
* evaluate to true. We attempt to refill descriptor blocks
* as they are returned to us (sxg_complete_descriptor_blocks)
* so The only time this should evaluate to true is when
* sxg_complete_descriptor_blocks failed to allocate
* receive buffers.
*/
if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
sxg_stock_rcv_buffers(adapter);
}
/* It's more efficient to just set this to zero. */
/* But clearing the top bit saves potential debug info... */
/*
* It's more efficient to just set this to zero.
* But clearing the top bit saves potential debug info...
*/
Event->Status &= ~EVENT_STATUS_VALID;
/* Advanct to the next event */
/* Advance to the next event */
SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
Event = &EventRing->Ring[adapter->NextEvent[RssId]];
EventsProcessed++;
......@@ -1309,9 +1334,11 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
EVENT_RING_BATCH, FALSE);
EventsProcessed = 0;
/* If we've processed our batch limit, break out of the */
/* loop and return SXG_ISR_EVENT to arrange for us to */
/* be called again */
/*
* If we've processed our batch limit, break out of the
* loop and return SXG_ISR_EVENT to arrange for us to
* be called again
*/
if (Batches++ == EVENT_BATCH_LIMIT) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
TRACE_NOISY, "EvtLimit", Batches,
......@@ -1322,14 +1349,10 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
}
}
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
/* */
/* Indicate any received dumb-nic frames */
/* */
SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
#endif
/* */
/* Release events back to the card. */
/* */
if (EventsProcessed) {
WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
EventsProcessed, FALSE);
......@@ -1356,16 +1379,20 @@ static void sxg_complete_slow_send(struct adapter_t *adapter)
u32 *ContextType;
struct sxg_cmd *XmtCmd;
/* NOTE - This lock is dropped and regrabbed in this loop. */
/* This means two different processors can both be running */
/* through this loop. Be *very* careful. */
/*
* NOTE - This lock is dropped and regrabbed in this loop.
* This means two different processors can both be running/
* through this loop. Be *very* careful.
*/
spin_lock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
/* Locate the current Cmd (ring descriptor entry), and */
/* associated SGL, and advance the tail */
/*
* Locate the current Cmd (ring descriptor entry), and
* associated SGL, and advance the tail
*/
SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
ASSERT(ContextType);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
......@@ -1390,10 +1417,12 @@ static void sxg_complete_slow_send(struct adapter_t *adapter)
ASSERT(adapter->Stats.XmtQLen);
adapter->Stats.XmtQLen--; /* within XmtZeroLock */
adapter->Stats.XmtOk++;
/* Now drop the lock and complete the send back to */
/* Microsoft. We need to drop the lock because */
/* Microsoft can come back with a chimney send, which */
/* results in a double trip in SxgTcpOuput */
/*
* Now drop the lock and complete the send back to
* Microsoft. We need to drop the lock because
* Microsoft can come back with a chimney send, which
* results in a double trip in SxgTcpOuput
*/
spin_unlock(&adapter->XmtZeroLock);
SXG_COMPLETE_DUMB_SEND(adapter, skb);
/* and reacquire.. */
......@@ -1452,7 +1481,7 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev
for (i = 0; i < 32; i++)
dptr += sprintf(dptr, "%02x ", (unsigned)data[i]);
printk("ASK:sxg_slow_receive: data %s\n", dstr);
//memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), RcvDataBufferHdr->VirtualAddress, Event->Length);
/* memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), RcvDataBufferHdr->VirtualAddress, Event->Length);*/
/* Change buffer state to UPSTREAM */
RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
......@@ -1481,17 +1510,18 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev
}
}
#endif
/* */
/* Dumb-nic frame. See if it passes our mac filter and update stats */
/* */
/* ASK if (!sxg_mac_filter(adapter,
SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
Event->Length)) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
Event->Length, 0);
goto drop;
} */
/*
* ASK if (!sxg_mac_filter(adapter,
* SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
* Event->Length)) {
* SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
* Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
* Event->Length, 0);
* goto drop;
* }
*/
Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
......@@ -1500,9 +1530,7 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, struct sxg_ev
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
RcvDataBufferHdr, Packet, Event->Length, 0);
/* */
/* Lastly adjust the receive packet length. */
/* */
RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
......@@ -1654,9 +1682,11 @@ static bool sxg_mac_filter(struct adapter_t *adapter, struct ether_header *Ether
}
}
} else if (adapter->MacFilter & MAC_DIRECTED) {
/* Not broadcast or multicast. Must be directed at us or */
/* the card is in promiscuous mode. Either way, consider it */
/* ours if MAC_DIRECTED is set */
/*
* Not broadcast or multicast. Must be directed at us or
* the card is in promiscuous mode. Either way, consider it
* ours if MAC_DIRECTED is set
*/
adapter->Stats.DumbRcvUcastPkts++;
adapter->Stats.DumbRcvUcastBytes += length;
adapter->Stats.DumbRcvPkts++;
......@@ -1784,9 +1814,7 @@ static int sxg_if_init(struct adapter_t *adapter)
adapter->state = ADAPT_UP;
/*
* clear any pending events, then enable interrupts
*/
/* clear any pending events, then enable interrupts */
DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
return (STATUS_SUCCESS);
......@@ -1929,7 +1957,6 @@ static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
#define NORMAL_ETHFRAME 0
/*
*
* sxg_send_packets - Send a skb packet
*
* Arguments:
......@@ -1944,8 +1971,10 @@ static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
u32 status = STATUS_SUCCESS;
//DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
// skb);
/*
* DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
* skb);
*/
printk("ASK:sxg_send_packets: skb[%p]\n", skb);
/* Check the adapter state */
......@@ -2016,8 +2045,10 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
void *SglBuffer;
u32 SglBufferLength;
/* The vast majority of work is done in the shared */
/* sxg_dumb_sgl routine. */
/*
* The vast majority of work is done in the shared
* sxg_dumb_sgl routine.
*/
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
adapter, skb, 0, 0);
......@@ -2089,8 +2120,10 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
ASSERT(SxgSgl->VlanTag.VlanTci == 0);
ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
/* From here below we work with the SGL placed in our */
/* buffer. */
/*
* From here below we work with the SGL placed in our
* buffer.
*/
SxgSgl->Sgl.NumberOfElements = 1;
......@@ -2098,8 +2131,10 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
spin_lock(&adapter->XmtZeroLock);
SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
if (XmtCmd == NULL) {
/* Call sxg_complete_slow_send to see if we can */
/* free up any XmtRingZero entries and then try again */
/*
* Call sxg_complete_slow_send to see if we can
* free up any XmtRingZero entries and then try again
*/
spin_unlock(&adapter->XmtZeroLock);
sxg_complete_slow_send(adapter);
spin_lock(&adapter->XmtZeroLock);
......@@ -2128,8 +2163,10 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
adapter->Stats.DumbXmtUcastBytes += DataLength;
}
#endif
/* Fill in the command */
/* Copy out the first SGE to the command and adjust for offset */
/*
* Fill in the command
* Copy out the first SGE to the command and adjust for offset
*/
phys_addr =
pci_map_single(adapter->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE);
......@@ -2141,16 +2178,13 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
XmtCmd->SgEntries = 1;
XmtCmd->Flags = 0;
printk("ASK:sxg_dumb_sgl: wrote to xmit register\n");
/* */
/* Advance transmit cmd descripter by 1. */
/* NOTE - See comments in SxgTcpOutput where we write */
/* to the XmtCmd register regarding CPU ID values and/or */
/* multiple commands. */
/* */
/* */
/*
* Advance transmit cmd descripter by 1.
* NOTE - See comments in SxgTcpOutput where we write
* to the XmtCmd register regarding CPU ID values and/or
* multiple commands.
*/
WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
/* */
/* */
adapter->Stats.XmtQLen++; /* Stats within lock */
spin_unlock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
......@@ -2158,17 +2192,21 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
return;
abortcmd:
/* NOTE - Only jump to this label AFTER grabbing the */
/* XmtZeroLock, and DO NOT DROP IT between the */
/* command allocation and the following abort. */
/*
* NOTE - Only jump to this label AFTER grabbing the
* XmtZeroLock, and DO NOT DROP IT between the
* command allocation and the following abort.
*/
if (XmtCmd) {
SXG_ABORT_CMD(XmtRingInfo);
}
spin_unlock(&adapter->XmtZeroLock);
/* failsgl: */
/* Jump to this label if failure occurs before the */
/* XmtZeroLock is grabbed */
/*
* failsgl:
* Jump to this label if failure occurs before the
* XmtZeroLock is grabbed
*/
adapter->Stats.XmtErrors++;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
......@@ -2176,11 +2214,9 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, struct sxg_scatter_gather *Sx
SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); /* SxgSgl->DumbPacket is the skb */
}
/***************************************************************
* Link management functions
***************************************************************/
/*
* Link management functions
*
* sxg_initialize_link - Initialize the link stuff
*
* Arguments -
......@@ -2212,10 +2248,12 @@ static int sxg_initialize_link(struct adapter_t *adapter)
/* Reset all MAC modules */
WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
/* Link address 0 */
/* XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) */
/* is stored with the first nibble (0a) in the byte 0 */
/* of the Mac address. Possibly reverse? */
/*
* Link address 0
* XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
* is stored with the first nibble (0a) in the byte 0
* of the Mac address. Possibly reverse?
*/
Value = *(u32 *) adapter->macaddr;
WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
/* also write the MAC address to the MAC. Endian is reversed. */
......@@ -2253,16 +2291,18 @@ static int sxg_initialize_link(struct adapter_t *adapter)
if (adapter->JumboEnabled) {
WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
}
/* AMIIM Configuration Register - */
/* The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion */
/* (bottom bits) of this register is used to determine the */
/* MDC frequency as specified in the A-XGMAC Design Document. */
/* This value must not be zero. The following value (62 or 0x3E) */
/* is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. */
/* Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), */
/* we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. */
/* This value happens to be the default value for this register, */
/* so we really don't have to do this. */
/*
* AMIIM Configuration Register -
* The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
* (bottom bits) of this register is used to determine the MDC frequency
* as specified in the A-XGMAC Design Document. This value must not be
* zero. The following value (62 or 0x3E) is based on our MAC transmit
* clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
* frequency of 2.5 MHz (see the PHY spec), we get:
* 312.5/(2*(X+1)) < 2.5 ==> X = 62.
* This value happens to be the default value for this register, so we
* really don't have to do this.
*/
WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
/* Power up and enable PHY and XAUI/XGXS/Serdes logic */
......@@ -2272,8 +2312,10 @@ static int sxg_initialize_link(struct adapter_t *adapter)
LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
/* Per information given by Aeluros, wait 100 ms after removing reset. */
/* It's not enough to wait for the self-clearing reset bit in reg 0 to clear. */
/*
* Per information given by Aeluros, wait 100 ms after removing reset.
* It's not enough to wait for the self-clearing reset bit in reg 0 to clear.
*/
mdelay(100);
/* Verify the PHY has come up by checking that the Reset bit has cleared. */
......@@ -2409,8 +2451,10 @@ static void sxg_link_event(struct adapter_t *adapter)
/* Check the Link Status register. We should have a Link Alarm. */
READ_REG(HwRegs->LinkStatus, Value);
if (Value & LS_LINK_ALARM) {
/* We got a Link Status alarm. First, pause to let the */
/* link state settle (it can bounce a number of times) */
/*
* We got a Link Status alarm. First, pause to let the
* link state settle (it can bounce a number of times)
*/
mdelay(10);
/* Now clear the alarm by reading the LASI status register. */
......@@ -2430,11 +2474,13 @@ static void sxg_link_event(struct adapter_t *adapter)
DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
} else {
/* XXXTODO - Assuming Link Attention is only being generated for the */
/* Link Alarm pin (and not for a XAUI Link Status change), then it's */
/* impossible to get here. Yet we've gotten here twice (under extreme */
/* conditions - bouncing the link up and down many times a second). */
/* Needs further investigation. */
/*
* XXXTODO - Assuming Link Attention is only being generated
* for the Link Alarm pin (and not for a XAUI Link Status change)
* , then it's impossible to get here. Yet we've gotten here
* twice (under extreme conditions - bouncing the link up and
* down many times a second). Needs further investigation.
*/
DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
/* ASSERT(0); */
......@@ -2462,8 +2508,10 @@ static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
adapter, 0, 0, 0);
/* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if */
/* the following 3 bits (from 3 different MDIO registers) are all true. */
/*
* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
* the following 3 bits (from 3 different MDIO registers) are all true.
*/
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
PHY_PMA_RCV_DET, /* PMA/PMD Receive Signal Detect register */
&Value);
......@@ -2540,8 +2588,10 @@ static void sxg_link_state(struct adapter_t *adapter, enum SXG_LINK_STATE LinkSt
DBG_ERROR("ENTER %s\n", __func__);
/* Hold the adapter lock during this routine. Maybe move */
/* the lock to the caller. */
/*
* Hold the adapter lock during this routine. Maybe move
* the lock to the caller.
*/
spin_lock(&adapter->AdapterLock);
if (LinkState == adapter->LinkState) {
/* Nothing changed.. */
......@@ -2753,7 +2803,6 @@ static int sxg_read_mdio_reg(struct adapter_t *adapter,
*
* After the CRC for the 6 bytes is generated (but before the value is complemented),
* we must then transpose the value and return bits 30-23.
*
*/
static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */
......@@ -2820,7 +2869,8 @@ static void sxg_mcast_set_mask(struct adapter_t *adapter)
adapter->MulticastMask);
if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
/* Turn on all multicast addresses. We have to do this for promiscuous
/*
* Turn on all multicast addresses. We have to do this for promiscuous
* mode as well as ALLMCAST mode. It saves the Microcode from having
* to keep state about the MAC configuration.
*/
......@@ -2830,7 +2880,8 @@ static void sxg_mcast_set_mask(struct adapter_t *adapter)
/* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */
} else {
/* Commit our multicast mast to the SLIC by writing to the multicast
/*
* Commit our multicast mast to the SLIC by writing to the multicast
* address mask registers
*/
DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
......@@ -2886,7 +2937,8 @@ static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
/* Get the CRC polynomial for the mac address */
crcpoly = sxg_mcast_get_mac_hash(address);
/* We only have space on the SLIC for 64 entries. Lop
/*
* We only have space on the SLIC for 64 entries. Lop
* off the top two bits. (2^6 = 64)
*/
crcpoly &= 0x3F;
......@@ -2911,10 +2963,12 @@ static void sxg_mcast_set_list(struct net_device *dev)
static void sxg_unmap_mmio_space(struct adapter_t *adapter)
{
#if LINUX_FREES_ADAPTER_RESOURCES
/* if (adapter->Regs) { */
/* iounmap(adapter->Regs); */
/* } */
/* adapter->slic_regs = NULL; */
/*
* if (adapter->Regs) {
* iounmap(adapter->Regs);
* }
* adapter->slic_regs = NULL;
*/
#endif
}
......@@ -2942,8 +2996,10 @@ void SxgFreeResources(struct adapter_t *adapter)
IsrCount = adapter->MsiEnabled ? RssIds : 1;
if (adapter->BasicAllocations == FALSE) {
/* No allocations have been made, including spinlocks, */
/* or listhead initializations. Return. */
/*
* No allocations have been made, including spinlocks,
* or listhead initializations. Return.
*/
return;
}
......@@ -3080,14 +3136,17 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
adapter, Size, BufferType, 0);
/* Grab the adapter lock and check the state. */
/* If we're in anything other than INITIALIZING or */
/* RUNNING state, fail. This is to prevent */
/* allocations in an improper driver state */
/*
* Grab the adapter lock and check the state. If we're in anything other
* than INITIALIZING or RUNNING state, fail. This is to prevent
* allocations in an improper driver state
*/
spin_lock(&adapter->AdapterLock);
/* Increment the AllocationsPending count while holding */
/* the lock. Pause processing relies on this */
/*
* Increment the AllocationsPending count while holding
* the lock. Pause processing relies on this
*/
++adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
......@@ -3095,8 +3154,10 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
if (Buffer == NULL) {
spin_lock(&adapter->AdapterLock);
/* Decrement the AllocationsPending count while holding */
/* the lock. Pause processing relies on this */
/*
* Decrement the AllocationsPending count while holding
* the lock. Pause processing relies on this
*/
--adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
......@@ -3121,7 +3182,6 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
* Length - Memory length
*
* Return
*
*/
static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
void *RcvBlock,
......@@ -3146,10 +3206,11 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
(BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
/* First, initialize the contained pool of receive data */
/* buffers. This initialization requires NBL/NB/MDL allocations, */
/* If any of them fail, free the block and return without */
/* queueing the shared memory */
/*
* First, initialize the contained pool of receive data buffers.
* This initialization requires NBL/NB/MDL allocations, if any of them
* fail, free the block and return without queueing the shared memory
*/
RcvDataBuffer = RcvBlock;
#if 0
for (i = 0, Paddr = *PhysicalAddress;
......@@ -3159,7 +3220,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
for (i = 0, Paddr = PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
/* */
RcvDataBufferHdr =
(struct sxg_rcv_data_buffer_hdr*) (RcvDataBuffer +
SXG_RCV_DATA_BUFFER_HDR_OFFSET
......@@ -3170,7 +3231,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr);
//ASK hardcoded 2048
/* ASK hardcoded 2048 */
RcvDataBufferHdr->PhysicalAddress = pci_map_single(adapter->pcidev,
RcvDataBufferHdr->SxgDumbRcvPacket->data,
2048,
......@@ -3180,8 +3241,10 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
}
/* Place this entire block of memory on the AllRcvBlocks queue so it can be */
/* free later */
/*
* Place this entire block of memory on the AllRcvBlocks queue so it
* can be free later
*/
RcvBlockHdr =
(struct sxg_rcv_block_hdr*) ((unsigned char *)RcvBlock +
SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
......@@ -3254,7 +3317,6 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
* Length - Memory length
*
* Return
*
*/
static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
struct sxg_scatter_gather *SxgSgl,
......@@ -3279,10 +3341,11 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
static void sxg_adapter_set_hwaddr(struct adapter_t *adapter)
{
/* DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, */
/* card->config_set, adapter->port, adapter->physport, adapter->functionnumber); */
/* */
/* sxg_dbg_macaddrs(adapter); */
/*
* DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__,
* card->config_set, adapter->port, adapter->physport, adapter->functionnumber);
* sxg_dbg_macaddrs(adapter);
*/
memcpy(adapter->macaddr, temp_mac_address, sizeof(struct sxg_config_mac));
/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */
......@@ -3335,18 +3398,15 @@ static int sxg_mac_set_address(struct net_device *dev, void *ptr)
}
#endif
/*****************************************************************************/
/************* SXG DRIVER FUNCTIONS (below) ********************************/
/*****************************************************************************/
/*
* SXG DRIVER FUNCTIONS (below)
*
* sxg_initialize_adapter - Initialize adapter
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* int
* Return - int
*/
static int sxg_initialize_adapter(struct adapter_t *adapter)
{
......@@ -3360,8 +3420,10 @@ static int sxg_initialize_adapter(struct adapter_t *adapter)
RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
IsrCount = adapter->MsiEnabled ? RssIds : 1;
/* Sanity check SXG_UCODE_REGS structure definition to */
/* make sure the length is correct */
/*
* Sanity check SXG_UCODE_REGS structure definition to
* make sure the length is correct
*/
ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
/* Disable interrupts */
......@@ -3410,11 +3472,12 @@ static int sxg_initialize_adapter(struct adapter_t *adapter)
/* Populate the card with receive buffers */
sxg_stock_rcv_buffers(adapter);
/* Initialize checksum offload capabilities. At the moment */
/* we always enable IP and TCP receive checksums on the card. */
/* Depending on the checksum configuration specified by the */
/* user, we can choose to report or ignore the checksum */
/* information provided by the card. */
/*
* Initialize checksum offload capabilities. At the moment we always
* enable IP and TCP receive checksums on the card. Depending on the
* checksum configuration specified by the user, we can choose to
* report or ignore the checksum information provided by the card.
*/
WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
......@@ -3426,8 +3489,10 @@ static int sxg_initialize_adapter(struct adapter_t *adapter)
if (status != STATUS_SUCCESS) {
return (status);
}
/* Initialize Dead to FALSE. */
/* SlicCheckForHang or SlicDumpThread will take it from here. */
/*
* Initialize Dead to FALSE.
* SlicCheckForHang or SlicDumpThread will take it from here.
*/
adapter->Dead = FALSE;
adapter->PingOutstanding = FALSE;
adapter->State = SXG_STATE_RUNNING;
......@@ -3465,8 +3530,10 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter,
ASSERT(RcvDescriptorBlockHdr);
/* If we don't have the resources to fill the descriptor block, */
/* return failure */
/*
* If we don't have the resources to fill the descriptor block,
* return failure
*/
if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
SXG_RING_FULL(RcvRingInfo)) {
adapter->Stats.NoMem++;
......@@ -3500,10 +3567,12 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter,
/* Add the descriptor block to receive descriptor ring 0 */
RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
/* RcvBuffersOnCard is not protected via the receive lock (see */
/* sxg_process_event_queue) We don't want to grap a lock every time a */
/* buffer is returned to us, so we use atomic interlocked functions */
/* instead. */
/*
* RcvBuffersOnCard is not protected via the receive lock (see
* sxg_process_event_queue) We don't want to grap a lock every time a
* buffer is returned to us, so we use atomic interlocked functions
* instead.
*/
adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
......@@ -3533,10 +3602,12 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
adapter, adapter->RcvBuffersOnCard,
adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
/* First, see if we've got less than our minimum threshold of */
/* receive buffers, there isn't an allocation in progress, and */
/* we haven't exceeded our maximum.. get another block of buffers */
/* None of this needs to be SMP safe. It's round numbers. */
/*
* First, see if we've got less than our minimum threshold of
* receive buffers, there isn't an allocation in progress, and
* we haven't exceeded our maximum.. get another block of buffers
* None of this needs to be SMP safe. It's round numbers.
*/
if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
(adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
(adapter->AllocationsPending == 0)) {
......@@ -3608,11 +3679,11 @@ static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
spin_lock(&adapter->RcvQLock);
ASSERT(Index != RcvRingInfo->Tail);
while (RcvRingInfo->Tail != Index) {
/* */
/* Locate the current Cmd (ring descriptor entry), and */
/* associated receive descriptor block, and advance */
/* the tail */
/* */
/*
* Locate the current Cmd (ring descriptor entry), and
* associated receive descriptor block, and advance
* the tail
*/
SXG_RETURN_CMD(RingZero,
RcvRingInfo,
RingDescriptorCmd, RcvDescriptorBlockHdr);
......@@ -3622,10 +3693,12 @@ static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
/* Clear the SGL field */
RingDescriptorCmd->Sgl = 0;
/* Attempt to refill it and hand it right back to the */
/* card. If we fail to refill it, free the descriptor block */
/* header. The card will be restocked later via the */
/* RcvBuffersOnCard test */
/*
* Attempt to refill it and hand it right back to the
* card. If we fail to refill it, free the descriptor block
* header. The card will be restocked later via the
* RcvBuffersOnCard test
*/
if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
STATUS_FAILURE) {
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
......
......@@ -43,78 +43,78 @@
#define __SXG_DRIVER_H__
#define p_net_device struct net_device *
// struct sxg_stats - Probably move these to someplace where
// the slicstat (sxgstat?) program can get them.
/*
* struct sxg_stats - Probably move these to someplace where
* the slicstat (sxgstat?) program can get them.
*/
struct sxg_stats {
// Xmt
u32 XmtNBL; // Offload send NBL count
u64 DumbXmtBytes; // Dumbnic send bytes
u64 SlowXmtBytes; // Slowpath send bytes
u64 FastXmtBytes; // Fastpath send bytes
u64 DumbXmtPkts; // Dumbnic send packets
u64 SlowXmtPkts; // Slowpath send packets
u64 FastXmtPkts; // Fastpath send packets
u64 DumbXmtUcastPkts; // directed packets
u64 DumbXmtMcastPkts; // Multicast packets
u64 DumbXmtBcastPkts; // OID_GEN_BROADCAST_FRAMES_RCV
u64 DumbXmtUcastBytes; // OID_GEN_DIRECTED_BYTES_XMIT
u64 DumbXmtMcastBytes; // OID_GEN_MULTICAST_BYTES_XMIT
u64 DumbXmtBcastBytes; // OID_GEN_BROADCAST_BYTES_XMIT
u64 XmtErrors; // OID_GEN_XMIT_ERROR
u64 XmtDiscards; // OID_GEN_XMIT_DISCARDS
u64 XmtOk; // OID_GEN_XMIT_OK
u64 XmtQLen; // OID_GEN_TRANSMIT_QUEUE_LENGTH
u64 XmtZeroFull; // Transmit ring zero full
// Rcv
u32 RcvNBL; // Offload recieve NBL count
u64 DumbRcvBytes; // dumbnic recv bytes
u64 DumbRcvUcastBytes; // OID_GEN_DIRECTED_BYTES_RCV
u64 DumbRcvMcastBytes; // OID_GEN_MULTICAST_BYTES_RCV
u64 DumbRcvBcastBytes; // OID_GEN_BROADCAST_BYTES_RCV
u64 SlowRcvBytes; // Slowpath recv bytes
u64 FastRcvBytes; // Fastpath recv bytes
u64 DumbRcvPkts; // OID_GEN_DIRECTED_FRAMES_RCV
u64 DumbRcvTcpPkts; // See SxgCollectStats
u64 DumbRcvUcastPkts; // directed packets
u64 DumbRcvMcastPkts; // Multicast packets
u64 DumbRcvBcastPkts; // OID_GEN_BROADCAST_FRAMES_RCV
u64 SlowRcvPkts; // OID_GEN_DIRECTED_FRAMES_RCV
u64 RcvErrors; // OID_GEN_RCV_ERROR
u64 RcvDiscards; // OID_GEN_RCV_DISCARDS
u64 RcvNoBuffer; // OID_GEN_RCV_NO_BUFFER
u64 PdqFull; // Processed Data Queue Full
u64 EventRingFull; // Event ring full
// Verbose stats
u64 MaxSends; // Max sends outstanding
u64 NoSglBuf; // SGL buffer allocation failure
u64 SglFail; // NDIS SGL failure
u64 SglAsync; // NDIS SGL failure
u64 NoMem; // Memory allocation failure
u64 NumInts; // Interrupts
u64 FalseInts; // Interrupt with ISR == 0
u64 XmtDrops; // No sahara DRAM buffer for xmt
// Sahara receive status
u64 TransportCsum; // SXG_RCV_STATUS_TRANSPORT_CSUM
u64 TransportUflow; // SXG_RCV_STATUS_TRANSPORT_UFLOW
u64 TransportHdrLen; // SXG_RCV_STATUS_TRANSPORT_HDRLEN
u64 NetworkCsum; // SXG_RCV_STATUS_NETWORK_CSUM:
u64 NetworkUflow; // SXG_RCV_STATUS_NETWORK_UFLOW:
u64 NetworkHdrLen; // SXG_RCV_STATUS_NETWORK_HDRLEN:
u64 Parity; // SXG_RCV_STATUS_PARITY
u64 LinkParity; // SXG_RCV_STATUS_LINK_PARITY:
u64 LinkEarly; // SXG_RCV_STATUS_LINK_EARLY:
u64 LinkBufOflow; // SXG_RCV_STATUS_LINK_BUFOFLOW:
u64 LinkCode; // SXG_RCV_STATUS_LINK_CODE:
u64 LinkDribble; // SXG_RCV_STATUS_LINK_DRIBBLE:
u64 LinkCrc; // SXG_RCV_STATUS_LINK_CRC:
u64 LinkOflow; // SXG_RCV_STATUS_LINK_OFLOW:
u64 LinkUflow; // SXG_RCV_STATUS_LINK_UFLOW:
/* Xmt */
u32 XmtNBL; /* Offload send NBL count */
u64 DumbXmtBytes; /* Dumbnic send bytes */
u64 SlowXmtBytes; /* Slowpath send bytes */
u64 FastXmtBytes; /* Fastpath send bytes */
u64 DumbXmtPkts; /* Dumbnic send packets */
u64 SlowXmtPkts; /* Slowpath send packets */
u64 FastXmtPkts; /* Fastpath send packets */
u64 DumbXmtUcastPkts; /* directed packets */
u64 DumbXmtMcastPkts; /* Multicast packets */
u64 DumbXmtBcastPkts; /* OID_GEN_BROADCAST_FRAMES_RCV */
u64 DumbXmtUcastBytes; /* OID_GEN_DIRECTED_BYTES_XMIT */
u64 DumbXmtMcastBytes; /* OID_GEN_MULTICAST_BYTES_XMIT */
u64 DumbXmtBcastBytes; /* OID_GEN_BROADCAST_BYTES_XMIT */
u64 XmtErrors; /* OID_GEN_XMIT_ERROR */
u64 XmtDiscards; /* OID_GEN_XMIT_DISCARDS */
u64 XmtOk; /* OID_GEN_XMIT_OK */
u64 XmtQLen; /* OID_GEN_TRANSMIT_QUEUE_LENGTH */
u64 XmtZeroFull; /* Transmit ring zero full */
/* Rcv */
u32 RcvNBL; /* Offload recieve NBL count */
u64 DumbRcvBytes; /* dumbnic recv bytes */
u64 DumbRcvUcastBytes; /* OID_GEN_DIRECTED_BYTES_RCV */
u64 DumbRcvMcastBytes; /* OID_GEN_MULTICAST_BYTES_RCV */
u64 DumbRcvBcastBytes; /* OID_GEN_BROADCAST_BYTES_RCV */
u64 SlowRcvBytes; /* Slowpath recv bytes */
u64 FastRcvBytes; /* Fastpath recv bytes */
u64 DumbRcvPkts; /* OID_GEN_DIRECTED_FRAMES_RCV */
u64 DumbRcvTcpPkts; /* See SxgCollectStats */
u64 DumbRcvUcastPkts; /* directed packets */
u64 DumbRcvMcastPkts; /* Multicast packets */
u64 DumbRcvBcastPkts; /* OID_GEN_BROADCAST_FRAMES_RCV */
u64 SlowRcvPkts; /* OID_GEN_DIRECTED_FRAMES_RCV */
u64 RcvErrors; /* OID_GEN_RCV_ERROR */
u64 RcvDiscards; /* OID_GEN_RCV_DISCARDS */
u64 RcvNoBuffer; /* OID_GEN_RCV_NO_BUFFER */
u64 PdqFull; /* Processed Data Queue Full */
u64 EventRingFull; /* Event ring full */
/* Verbose stats */
u64 MaxSends; /* Max sends outstanding */
u64 NoSglBuf; /* SGL buffer allocation failure */
u64 SglFail; /* NDIS SGL failure */
u64 SglAsync; /* NDIS SGL failure */
u64 NoMem; /* Memory allocation failure */
u64 NumInts; /* Interrupts */
u64 FalseInts; /* Interrupt with ISR == 0 */
u64 XmtDrops; /* No sahara DRAM buffer for xmt */
/* Sahara receive status */
u64 TransportCsum; /* SXG_RCV_STATUS_TRANSPORT_CSUM */
u64 TransportUflow; /* SXG_RCV_STATUS_TRANSPORT_UFLOW */
u64 TransportHdrLen; /* SXG_RCV_STATUS_TRANSPORT_HDRLEN */
u64 NetworkCsum; /* SXG_RCV_STATUS_NETWORK_CSUM: */
u64 NetworkUflow; /* SXG_RCV_STATUS_NETWORK_UFLOW: */
u64 NetworkHdrLen; /* SXG_RCV_STATUS_NETWORK_HDRLEN: */
u64 Parity; /* SXG_RCV_STATUS_PARITY */
u64 LinkParity; /* SXG_RCV_STATUS_LINK_PARITY: */
u64 LinkEarly; /* SXG_RCV_STATUS_LINK_EARLY: */
u64 LinkBufOflow; /* SXG_RCV_STATUS_LINK_BUFOFLOW: */
u64 LinkCode; /* SXG_RCV_STATUS_LINK_CODE: */
u64 LinkDribble; /* SXG_RCV_STATUS_LINK_DRIBBLE: */
u64 LinkCrc; /* SXG_RCV_STATUS_LINK_CRC: */
u64 LinkOflow; /* SXG_RCV_STATUS_LINK_OFLOW: */
u64 LinkUflow; /* SXG_RCV_STATUS_LINK_UFLOW: */
};
/****************************************************************************
* DUMB-NIC Send path definitions
****************************************************************************/
/* DUMB-NIC Send path definitions */
#define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb) { \
ASSERT(_skb); \
......@@ -126,17 +126,17 @@ struct sxg_stats {
dev_kfree_skb(_skb); \
}
// Locate current receive header buffer location. Use this
// instead of RcvDataHdr->VirtualAddress since the data
// may have been offset by SXG_ADVANCE_MDL_OFFSET
/*
* Locate current receive header buffer location. Use this
* instead of RcvDataHdr->VirtualAddress since the data
* may have been offset by SXG_ADVANCE_MDL_OFFSET
*/
#define SXG_RECEIVE_DATA_LOCATION(_RcvDataHdr) (_RcvDataHdr)->skb->data
/************************************************************************
* Dumb-NIC receive processing
************************************************************************/
// Define an SXG_PACKET as an NDIS_PACKET
/* Dumb-NIC receive processing */
/* Define an SXG_PACKET as an NDIS_PACKET */
#define PSXG_PACKET struct sk_buff *
// Indications array size
/* Indications array size */
#define SXG_RCV_ARRAYSIZE 64
#define SXG_ALLOCATE_RCV_PACKET(_pAdapt, _RcvDataBufferHdr) { \
......@@ -156,9 +156,11 @@ struct sxg_stats {
} \
}
// Macro to add a NDIS_PACKET to an indication array
// If we fill up our array of packet pointers, then indicate this
// block up now and start on a new one.
/*
* Macro to add a NDIS_PACKET to an indication array
* If we fill up our array of packet pointers, then indicate this
* block up now and start on a new one.
*/
#define SXG_ADD_RCV_PACKET(_pAdapt, _Packet, _PrevPacket, _IndicationList, _NumPackets) { \
(_IndicationList)[_NumPackets] = (_Packet); \
(_NumPackets)++; \
......@@ -182,7 +184,7 @@ struct sxg_stats {
#define SXG_REINIATIALIZE_PACKET(_Packet) \
{} /*_NdisReinitializePacket(_Packet)*/ /* this is not necessary with an skb */
// Definitions to initialize Dumb-nic Receive NBLs
/* Definitions to initialize Dumb-nic Receive NBLs */
#define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((struct sxg_rcv_nbl_reserved *)((_Packet)->MiniportReservedEx))->RcvDataBufferHdr)
#define SXG_RCV_SET_CHECKSUM_INFO(_Packet, _Cpi) \
......@@ -210,10 +212,10 @@ struct sxg_stats {
skb_put(Packet, (_Event)->Length); \
}
///////////////////////////////////////////////////////////////////////////////
// Macros to free a receive data buffer and receive data descriptor block
///////////////////////////////////////////////////////////////////////////////
// NOTE - Lock must be held with RCV macros
/*
* Macros to free a receive data buffer and receive data descriptor block
* NOTE - Lock must be held with RCV macros
*/
#define SXG_GET_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \
struct list_entry *_ple; \
_Hdr = NULL; \
......@@ -246,7 +248,7 @@ struct sxg_stats {
InsertTailList(&(_pAdapt)->FreeRcvBlocks, &(_Hdr)->FreeList); \
}
// SGL macros
/* SGL macros */
#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \
spin_lock(&(_pAdapt)->SglQLock); \
(_pAdapt)->FreeSglBufferCount++; \
......@@ -257,11 +259,13 @@ struct sxg_stats {
spin_unlock(&(_pAdapt)->SglQLock); \
}
// Get an SGL buffer from the free queue. The first part of this macro
// attempts to keep ahead of buffer depletion by allocating more when
// we hit a minimum threshold. Note that we don't grab the lock
// until after that. We're dealing with round numbers here, so we don't need to,
// and not grabbing it avoids a possible double-trip.
/*
* Get an SGL buffer from the free queue. The first part of this macro
* attempts to keep ahead of buffer depletion by allocating more when
* we hit a minimum threshold. Note that we don't grab the lock
* until after that. We're dealing with round numbers here, so we don't need to,
* and not grabbing it avoids a possible double-trip.
*/
#define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl) { \
struct list_entry *_ple; \
if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \
......@@ -285,28 +289,30 @@ struct sxg_stats {
spin_unlock(&(_pAdapt)->SglQLock); \
}
//
// struct sxg_multicast_address
//
// Linked list of multicast addresses.
/*
* struct sxg_multicast_address
* Linked list of multicast addresses.
*/
struct sxg_multicast_address {
unsigned char Address[6];
struct sxg_multicast_address *Next;
};
// Structure to maintain chimney send and receive buffer queues.
// This structure maintains NET_BUFFER_LIST queues that are
// given to us via the Chimney MiniportTcpOffloadSend and
// MiniportTcpOffloadReceive routines. This structure DOES NOT
// manage our data buffer queue
/*
* Structure to maintain chimney send and receive buffer queues.
* This structure maintains NET_BUFFER_LIST queues that are
* given to us via the Chimney MiniportTcpOffloadSend and
* MiniportTcpOffloadReceive routines. This structure DOES NOT
* manage our data buffer queue
*/
struct sxg_buffer_queue {
u32 Type; // Slow or fast - See below
u32 Direction; // Xmt or Rcv
u32 Bytes; // Byte count
u32 * Head; // Send queue head
u32 * Tail; // Send queue tail
// PNET_BUFFER_LIST NextNBL; // Short cut - next NBL
// PNET_BUFFER NextNB; // Short cut - next NB
u32 Type; /* Slow or fast - See below */
u32 Direction; /* Xmt or Rcv */
u32 Bytes; /* Byte count */
u32 * Head; /* Send queue head */
u32 * Tail; /* Send queue tail */
/* PNET_BUFFER_LIST NextNBL;*/ /* Short cut - next NBL */
/* PNET_BUFFER NextNB; */ /* Short cut - next NB */
};
#define SXG_SLOW_SEND_BUFFER 0
......@@ -329,64 +335,66 @@ struct sxg_buffer_queue {
#define SXG_RSS_CPU_COUNT(_pAdapt) \
((_pAdapt)->RssEnabled ? NR_CPUS : 1)
/****************************************************************************
* DRIVER and ADAPTER structures
****************************************************************************/
/* DRIVER and ADAPTER structures */
// Adapter states - These states closely match the adapter states
// documented in the DDK (with a few exceptions).
/*
* Adapter states - These states closely match the adapter states
* documented in the DDK (with a few exceptions).
*/
enum SXG_STATE {
SXG_STATE_INITIALIZING, // Initializing
SXG_STATE_BOOTDIAG, // Boot-Diagnostic mode
SXG_STATE_PAUSING, // Pausing
SXG_STATE_PAUSED, // Paused
SXG_STATE_RUNNING, // Running
SXG_STATE_RESETTING, // Reset in progress
SXG_STATE_SLEEP, // Sleeping
SXG_STATE_DIAG, // Diagnostic mode
SXG_STATE_HALTING, // Halting
SXG_STATE_HALTED, // Down or not-initialized
SXG_STATE_SHUTDOWN // shutdown
SXG_STATE_INITIALIZING, /* Initializing */
SXG_STATE_BOOTDIAG, /* Boot-Diagnostic mode */
SXG_STATE_PAUSING, /* Pausing */
SXG_STATE_PAUSED, /* Paused */
SXG_STATE_RUNNING, /* Running */
SXG_STATE_RESETTING, /* Reset in progress */
SXG_STATE_SLEEP, /* Sleeping */
SXG_STATE_DIAG, /* Diagnostic mode */
SXG_STATE_HALTING, /* Halting */
SXG_STATE_HALTED, /* Down or not-initialized */
SXG_STATE_SHUTDOWN /* shutdown */
};
// Link state
/* Link state */
enum SXG_LINK_STATE {
SXG_LINK_DOWN,
SXG_LINK_UP
};
// Link initialization timeout in 100us units
#define SXG_LINK_TIMEOUT 100000 // 10 Seconds - REDUCE!
/* Link initialization timeout in 100us units */
#define SXG_LINK_TIMEOUT 100000 /* 10 Seconds - REDUCE! */
// Microcode file selection codes
/* Microcode file selection codes */
enum SXG_UCODE_SEL {
SXG_UCODE_SAHARA, // Sahara ucode
SXG_UCODE_SDIAGCPU, // Sahara CPU diagnostic ucode
SXG_UCODE_SDIAGSYS // Sahara system diagnostic ucode
SXG_UCODE_SAHARA, /* Sahara ucode */
SXG_UCODE_SDIAGCPU, /* Sahara CPU diagnostic ucode */
SXG_UCODE_SDIAGSYS /* Sahara system diagnostic ucode */
};
#define SXG_DISABLE_ALL_INTERRUPTS(_padapt) sxg_disable_interrupt(_padapt)
#define SXG_ENABLE_ALL_INTERRUPTS(_padapt) sxg_enable_interrupt(_padapt)
// This probably lives in a proto.h file. Move later
/* This probably lives in a proto.h file. Move later */
#define SXG_MULTICAST_PACKET(_pether) ((_pether)->ether_dhost[0] & 0x01)
#define SXG_BROADCAST_PACKET(_pether) ((*(u32 *)(_pether)->ether_dhost == 0xFFFFFFFF) && \
(*(u16 *)&(_pether)->ether_dhost[4] == 0xFFFF))
// For DbgPrints
/* For DbgPrints */
#define SXG_ID DPFLTR_IHVNETWORK_ID
#define SXG_ERROR DPFLTR_ERROR_LEVEL
//
// struct sxg_driver structure -
//
// contains information about the sxg driver. There is only
// one of these, and it is defined as a global.
/*
* struct sxg_driver structure -
*
* contains information about the sxg driver. There is only
* one of these, and it is defined as a global.
*/
struct sxg_driver {
struct adapter_t *Adapters; // Linked list of adapters
ushort AdapterID; // Maintain unique adapter ID
struct adapter_t *Adapters; /* Linked list of adapters */
ushort AdapterID; /* Maintain unique adapter ID */
};
#ifdef STATUS_SUCCESS
......@@ -404,12 +412,14 @@ struct sxg_driver {
#define SLIC_MAX_CARDS 32
#define SLIC_MAX_PORTS 4 /* Max # of ports per card */
#if SLIC_DUMP_ENABLED
// Dump buffer size
//
// This cannot be bigger than the max DMA size the card supports,
// given the current code structure in the host and ucode.
// Mojave supports 16K, Oasis supports 16K-1, so
// just set this at 15K, shouldnt make that much of a diff.
/*
* Dump buffer size
* This cannot be bigger than the max DMA size the card supports,
* given the current code structure in the host and ucode.
* Mojave supports 16K, Oasis supports 16K-1, so
* just set this at 15K, shouldnt make that much of a diff.
*/
#define DUMP_BUF_SIZE 0x3C00
#endif
......@@ -560,123 +570,123 @@ struct adapter_t {
u32 rcv_interrupt_yields;
u32 intagg_period;
struct net_device_stats stats;
u32 * MiniportHandle; // Our miniport handle
enum SXG_STATE State; // Adapter state
enum SXG_LINK_STATE LinkState; // Link state
u64 LinkSpeed; // Link Speed
u32 PowerState; // NDIS power state
struct adapter_t *Next; // Linked list
ushort AdapterID; // 1..n
u32 * MiniportHandle; /* Our miniport handle */
enum SXG_STATE State; /* Adapter state */
enum SXG_LINK_STATE LinkState; /* Link state */
u64 LinkSpeed; /* Link Speed */
u32 PowerState; /* NDIS power state */
struct adapter_t *Next; /* Linked list */
ushort AdapterID; /* 1..n */
struct net_device * netdev;
struct net_device * next_netdevice;
struct pci_dev * pcidev;
struct sxg_multicast_address *MulticastAddrs; // Multicast list
u64 MulticastMask; // Multicast mask
u32 * InterruptHandle; // Register Interrupt handle
u32 InterruptLevel; // From Resource list
u32 InterruptVector; // From Resource list
spinlock_t AdapterLock; /* Serialize access adapter routines */
spinlock_t Bit64RegLock; /* For writing 64-bit addresses */
struct sxg_hw_regs *HwRegs; // Sahara HW Register Memory (BAR0/1)
struct sxg_ucode_regs *UcodeRegs; // Microcode Register Memory (BAR2/3)
struct sxg_tcb_regs *TcbRegs; // Same as Ucode regs - See sxghw.h
ushort FrameSize; // Maximum frame size
u32 * DmaHandle; // NDIS DMA handle
u32 * PacketPoolHandle; // Used with NDIS 5.2 only. Don't ifdef out
u32 * BufferPoolHandle; // Used with NDIS 5.2 only. Don't ifdef out
u32 MacFilter; // NDIS MAC Filter
struct sxg_event_ring *EventRings; // Host event rings. 1/CPU to 16 max
dma_addr_t PEventRings; // Physical address
u32 NextEvent[SXG_MAX_RSS]; // Current location in ring
dma_addr_t PTcbBuffers; // TCB Buffers - physical address
dma_addr_t PTcbCompBuffers; // TCB Composite Buffers - phys addr
struct sxg_xmt_ring *XmtRings; // Transmit rings
dma_addr_t PXmtRings; // Transmit rings - physical address
struct sxg_ring_info XmtRingZeroInfo; // Transmit ring 0 info
struct pci_dev *pcidev;
struct sxg_multicast_address *MulticastAddrs; /* Multicast list */
u64 MulticastMask; /* Multicast mask */
u32 *InterruptHandle; /* Register Interrupt handle */
u32 InterruptLevel; /* From Resource list */
u32 InterruptVector; /* From Resource list */
spinlock_t AdapterLock; /* Serialize access adapter routines */
spinlock_t Bit64RegLock; /* For writing 64-bit addresses */
struct sxg_hw_regs *HwRegs; /* Sahara HW Register Memory (BAR0/1) */
struct sxg_ucode_regs *UcodeRegs; /* Microcode Register Memory (BAR2/3) */
struct sxg_tcb_regs *TcbRegs; /* Same as Ucode regs - See sxghw.h */
ushort FrameSize; /* Maximum frame size */
u32 * DmaHandle; /* NDIS DMA handle */
u32 * PacketPoolHandle; /* Used with NDIS 5.2 only. Don't ifdef out */
u32 * BufferPoolHandle; /* Used with NDIS 5.2 only. Don't ifdef out */
u32 MacFilter; /* NDIS MAC Filter */
struct sxg_event_ring *EventRings; /* Host event rings. 1/CPU to 16 max */
dma_addr_t PEventRings; /* Physical address */
u32 NextEvent[SXG_MAX_RSS]; /* Current location in ring */
dma_addr_t PTcbBuffers; /* TCB Buffers - physical address */
dma_addr_t PTcbCompBuffers; /* TCB Composite Buffers - phys addr */
struct sxg_xmt_ring *XmtRings; /* Transmit rings */
dma_addr_t PXmtRings; /* Transmit rings - physical address */
struct sxg_ring_info XmtRingZeroInfo; /* Transmit ring 0 info */
spinlock_t XmtZeroLock; /* Transmit ring 0 lock */
u32 * XmtRingZeroIndex; // Shared XMT ring 0 index
dma_addr_t PXmtRingZeroIndex; // Shared XMT ring 0 index - physical
struct list_entry FreeProtocolHeaders;// Free protocol headers
u32 FreeProtoHdrCount; // Count
void * ProtocolHeaders; // Block of protocol header
dma_addr_t PProtocolHeaders; // Block of protocol headers - phys
struct sxg_rcv_ring *RcvRings; // Receive rings
dma_addr_t PRcvRings; // Receive rings - physical address
struct sxg_ring_info RcvRingZeroInfo; // Receive ring 0 info
u32 * Isr; // Interrupt status register
dma_addr_t PIsr; // ISR - physical address
u32 IsrCopy[SXG_MAX_RSS]; // Copy of ISR
ushort InterruptsEnabled; // Bitmask of enabled vectors
unsigned char * IndirectionTable; // RSS indirection table
dma_addr_t PIndirectionTable; // Physical address
ushort RssTableSize; // From NDIS_RECEIVE_SCALE_PARAMETERS
ushort HashKeySize; // From NDIS_RECEIVE_SCALE_PARAMETERS
unsigned char HashSecretKey[40]; // rss key
u32 HashInformation;
// Receive buffer queues
spinlock_t RcvQLock; /* Receive Queue Lock */
struct list_entry FreeRcvBuffers; // Free SXG_DATA_BUFFER queue
struct list_entry FreeRcvBlocks; // Free SXG_RCV_DESCRIPTOR_BLOCK Q
struct list_entry AllRcvBlocks; // All SXG_RCV_BLOCKs
ushort FreeRcvBufferCount; // Number of free rcv data buffers
ushort FreeRcvBlockCount; // # of free rcv descriptor blocks
ushort AllRcvBlockCount; // Number of total receive blocks
ushort ReceiveBufferSize; // SXG_RCV_DATA/JUMBO_BUFFER_SIZE only
u32 AllocationsPending; // Receive allocation pending
u32 RcvBuffersOnCard; // SXG_DATA_BUFFERS owned by card
// SGL buffers
u32 * XmtRingZeroIndex; /* Shared XMT ring 0 index */
dma_addr_t PXmtRingZeroIndex; /* Shared XMT ring 0 index - physical */
struct list_entry FreeProtocolHeaders;/* Free protocol headers */
u32 FreeProtoHdrCount; /* Count */
void * ProtocolHeaders; /* Block of protocol header */
dma_addr_t PProtocolHeaders; /* Block of protocol headers - phys */
struct sxg_rcv_ring *RcvRings; /* Receive rings */
dma_addr_t PRcvRings; /* Receive rings - physical address */
struct sxg_ring_info RcvRingZeroInfo; /* Receive ring 0 info */
u32 * Isr; /* Interrupt status register */
dma_addr_t PIsr; /* ISR - physical address */
u32 IsrCopy[SXG_MAX_RSS]; /* Copy of ISR */
ushort InterruptsEnabled; /* Bitmask of enabled vectors */
unsigned char *IndirectionTable; /* RSS indirection table */
dma_addr_t PIndirectionTable; /* Physical address */
ushort RssTableSize; /* From NDIS_RECEIVE_SCALE_PARAMETERS */
ushort HashKeySize; /* From NDIS_RECEIVE_SCALE_PARAMETERS */
unsigned char HashSecretKey[40]; /* rss key */
u32 HashInformation;
/* Receive buffer queues */
spinlock_t RcvQLock; /* Receive Queue Lock */
struct list_entry FreeRcvBuffers; /* Free SXG_DATA_BUFFER queue */
struct list_entry FreeRcvBlocks; /* Free SXG_RCV_DESCRIPTOR_BLOCK Q */
struct list_entry AllRcvBlocks; /* All SXG_RCV_BLOCKs */
ushort FreeRcvBufferCount; /* Number of free rcv data buffers */
ushort FreeRcvBlockCount; /* # of free rcv descriptor blocks */
ushort AllRcvBlockCount; /* Number of total receive blocks */
ushort ReceiveBufferSize; /* SXG_RCV_DATA/JUMBO_BUFFER_SIZE only */
u32 AllocationsPending; /* Receive allocation pending */
u32 RcvBuffersOnCard; /* SXG_DATA_BUFFERS owned by card */
/* SGL buffers */
spinlock_t SglQLock; /* SGL Queue Lock */
struct list_entry FreeSglBuffers; // Free SXG_SCATTER_GATHER
struct list_entry AllSglBuffers; // All SXG_SCATTER_GATHER
ushort FreeSglBufferCount; // Number of free SGL buffers
ushort AllSglBufferCount; // Number of total SGL buffers
u32 CurrentTime; // Tick count
u32 FastpathConnections;// # of fastpath connections
// Various single-bit flags:
u32 BasicAllocations:1; // Locks and listheads
u32 IntRegistered:1; // Interrupt registered
u32 PingOutstanding:1; // Ping outstanding to card
u32 Dead:1; // Card dead
u32 DumpDriver:1; // OID_SLIC_DRIVER_DUMP request
u32 DumpCard:1; // OID_SLIC_CARD_DUMP request
u32 DumpCmdRunning:1; // Dump command in progress
u32 DebugRunning:1; // AGDB debug in progress
u32 JumboEnabled:1; // Jumbo frames enabled
u32 MsiEnabled:1; // MSI interrupt enabled
u32 RssEnabled:1; // RSS Enabled
u32 FailOnBadEeprom:1; // Fail on Bad Eeprom
u32 DiagStart:1; // Init adapter for diagnostic start
// Stats
u32 PendingRcvCount; // Outstanding rcv indications
u32 PendingXmtCount; // Outstanding send requests
struct sxg_stats Stats; // Statistics
u32 ReassBufs; // Number of reassembly buffers
// Card Crash Info
ushort CrashLocation; // Microcode crash location
unsigned char CrashCpu; // Sahara CPU ID
// Diagnostics
// PDIAG_CMD DiagCmds; // List of free diagnostic commands
// PDIAG_BUFFER DiagBuffers; // List of free diagnostic buffers
// PDIAG_REQ DiagReqQ; // List of outstanding (asynchronous) diag requests
// u32 DiagCmdTimeout; // Time out for diag cmds (seconds) XXXTODO - replace with SXG_PARAM var?
// unsigned char DiagDmaDesc[DMA_CPU_CTXS]; // Free DMA descriptors bit field (32 CPU ctx * 8 DMA ctx)
/////////////////////////////////////////////////////////////////////
// Put preprocessor-conditional fields at the end so we don't
// have to recompile sxgdbg everytime we reconfigure the driver
/////////////////////////////////////////////////////////////////////
struct list_entry FreeSglBuffers; /* Free struct sxg_scatter_gather */
struct list_entry AllSglBuffers; /* All struct sxg_scatter_gather */
ushort FreeSglBufferCount; /* Number of free SGL buffers */
ushort AllSglBufferCount; /* Number of total SGL buffers */
u32 CurrentTime; /* Tick count */
u32 FastpathConnections;/* # of fastpath connections */
/* Various single-bit flags: */
u32 BasicAllocations:1; /* Locks and listheads */
u32 IntRegistered:1; /* Interrupt registered */
u32 PingOutstanding:1; /* Ping outstanding to card */
u32 Dead:1; /* Card dead */
u32 DumpDriver:1; /* OID_SLIC_DRIVER_DUMP request */
u32 DumpCard:1; /* OID_SLIC_CARD_DUMP request */
u32 DumpCmdRunning:1; /* Dump command in progress */
u32 DebugRunning:1; /* AGDB debug in progress */
u32 JumboEnabled:1; /* Jumbo frames enabled */
u32 MsiEnabled:1; /* MSI interrupt enabled */
u32 RssEnabled:1; /* RSS Enabled */
u32 FailOnBadEeprom:1; /* Fail on Bad Eeprom */
u32 DiagStart:1; /* Init adapter for diagnostic start */
/* Stats */
u32 PendingRcvCount; /* Outstanding rcv indications */
u32 PendingXmtCount; /* Outstanding send requests */
struct sxg_stats Stats; /* Statistics */
u32 ReassBufs; /* Number of reassembly buffers */
/* Card Crash Info */
ushort CrashLocation; /* Microcode crash location */
unsigned char CrashCpu; /* Sahara CPU ID */
/* Diagnostics */
/* PDIAG_CMD DiagCmds; */ /* List of free diagnostic commands */
/* PDIAG_BUFFER DiagBuffers; */ /* List of free diagnostic buffers */
/* PDIAG_REQ DiagReqQ; */ /* List of outstanding (asynchronous) diag requests */
/* u32 DiagCmdTimeout; */ /* Time out for diag cmds (seconds) XXXTODO - replace with SXG_PARAM var? */
/* unsigned char DiagDmaDesc[DMA_CPU_CTXS]; */ /* Free DMA descriptors bit field (32 CPU ctx * 8 DMA ctx) */
/*
* Put preprocessor-conditional fields at the end so we don't
* have to recompile sxgdbg everytime we reconfigure the driver
*/
#if defined(CONFIG_X86)
u32 AddrUpper; // Upper 32 bits of 64-bit register
u32 AddrUpper; /* Upper 32 bits of 64-bit register */
#endif
//#if SXG_FAILURE_DUMP
// NDIS_EVENT DumpThreadEvent; // syncronize dump thread
// BOOLEAN DumpThreadRunning; // termination flag
// PSXG_DUMP_CMD DumpBuffer; // 68k - Cmd and Buffer
// dma_addr_t PDumpBuffer; // Physical address
//#endif // SXG_FAILURE_DUMP
/*#if SXG_FAILURE_DUMP */
/* NDIS_EVENT DumpThreadEvent; */ /* syncronize dump thread */
/* BOOLEAN DumpThreadRunning; */ /* termination flag */
/* PSXG_DUMP_CMD DumpBuffer; */ /* 68k - Cmd and Buffer */
/* dma_addr_t PDumpBuffer; */ /* Physical address */
/*#endif */ /* SXG_FAILURE_DUMP */
};
......@@ -685,12 +695,10 @@ struct adapter_t {
#define SLIC_DUMP_IN_PROGRESS 2
#define SLIC_DUMP_DONE 3
/****************************************************************************
*
/*
* Microcode crash information structure. This
* structure is written out to the card's SRAM when the microcode panic's.
*
****************************************************************************/
*/
struct slic_crash_info {
ushort cpu_id;
ushort crash_pc;
......
......@@ -128,7 +128,7 @@ static __inline struct list_entry *RemoveTailList(struct list_entry *l)
#define SLIC_TIMESTAMP(value)
#endif
/****************** SXG DEFINES *****************************************/
/* SXG DEFINES */
#ifdef ATKDBG
#define SXG_TIMESTAMP(value) { \
......
......@@ -100,9 +100,7 @@ struct trace_entry {
u32 arg4; /* Caller arg4 */
};
/*
* Driver types for driver field in struct trace_entry
*/
/* Driver types for driver field in struct trace_entry */
#define TRACE_SXG 1
#define TRACE_VPCI 2
#define TRACE_SLIC 3
......@@ -129,11 +127,7 @@ struct sxg_trace_buffer {
#define TRACE_NOISY 10 /* Everything in the world */
/**********************************************************************
*
* The macros themselves -
*
*********************************************************************/
/* The macros themselves */
#if ATK_TRACE_ENABLED
#define SXG_TRACE_INIT(buffer, tlevel) \
{ \
......@@ -146,9 +140,7 @@ struct sxg_trace_buffer {
#define SXG_TRACE_INIT(buffer, tlevel)
#endif
/*
* The trace macro. This is active only if ATK_TRACE_ENABLED is set.
*/
/*The trace macro. This is active only if ATK_TRACE_ENABLED is set. */
#if ATK_TRACE_ENABLED
#define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4) { \
if ((buffer) && ((buffer)->level >= (tlevel))) { \
......
/*
/*******************************************************************
* Copyright 1997-2007 Alacritech, Inc. All rights reserved
*
* $Id: sxghif.h,v 1.5 2008/07/24 19:18:22 chris Exp $
......@@ -7,132 +7,132 @@
*
* This file contains structures and definitions for the
* Alacritech Sahara host interface
*/
******************************************************************/
/*******************************************************************************
* UCODE Registers
*******************************************************************************/
/* UCODE Registers */
struct sxg_ucode_regs {
// Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0
u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control
u32 RsvdReg1; // Code = 1 - TOE -NA
u32 RsvdReg2; // Code = 2 - TOE -NA
u32 RsvdReg3; // Code = 3 - TOE -NA
u32 RsvdReg4; // Code = 4 - TOE -NA
u32 RsvdReg5; // Code = 5 - TOE -NA
u32 CardUp; // Code = 6 - Microcode initialized when 1
u32 RsvdReg7; // Code = 7 - TOE -NA
u32 ConfigStat; // Code = 8 - Configuration data load status
u32 RsvdReg9; // Code = 9 - TOE -NA
u32 CodeNotUsed[6]; // Codes 10-15 not used. ExCode = 0
// This brings us to ExCode 1 at address 0x40 = Interrupt status pointer
u32 Isp; // Code = 0 (extended), ExCode = 1
u32 PadEx1[15]; // Codes 1-15 not used with extended codes
// ExCode 2 = Interrupt Status Register
u32 Isr; // Code = 0 (extended), ExCode = 2
/* Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 */
u32 Icr; /* Code = 0 (extended), ExCode = 0 - Int control */
u32 RsvdReg1; /* Code = 1 - TOE -NA */
u32 RsvdReg2; /* Code = 2 - TOE -NA */
u32 RsvdReg3; /* Code = 3 - TOE -NA */
u32 RsvdReg4; /* Code = 4 - TOE -NA */
u32 RsvdReg5; /* Code = 5 - TOE -NA */
u32 CardUp; /* Code = 6 - Microcode initialized when 1 */
u32 RsvdReg7; /* Code = 7 - TOE -NA */
u32 ConfigStat; /* Code = 8 - Configuration data load status */
u32 RsvdReg9; /* Code = 9 - TOE -NA */
u32 CodeNotUsed[6]; /* Codes 10-15 not used. ExCode = 0 */
/* This brings us to ExCode 1 at address 0x40 = Interrupt status pointer */
u32 Isp; /* Code = 0 (extended), ExCode = 1 */
u32 PadEx1[15]; /* Codes 1-15 not used with extended codes */
/* ExCode 2 = Interrupt Status Register */
u32 Isr; /* Code = 0 (extended), ExCode = 2 */
u32 PadEx2[15];
// ExCode 3 = Event base register. Location of event rings
u32 EventBase; // Code = 0 (extended), ExCode = 3
/* ExCode 3 = Event base register. Location of event rings */
u32 EventBase; /* Code = 0 (extended), ExCode = 3 */
u32 PadEx3[15];
// ExCode 4 = Event ring size
u32 EventSize; // Code = 0 (extended), ExCode = 4
/* ExCode 4 = Event ring size */
u32 EventSize; /* Code = 0 (extended), ExCode = 4 */
u32 PadEx4[15];
// ExCode 5 = TCB Buffers base address
u32 TcbBase; // Code = 0 (extended), ExCode = 5
/* ExCode 5 = TCB Buffers base address */
u32 TcbBase; /* Code = 0 (extended), ExCode = 5 */
u32 PadEx5[15];
// ExCode 6 = TCB Composite Buffers base address
u32 TcbCompBase; // Code = 0 (extended), ExCode = 6
/* ExCode 6 = TCB Composite Buffers base address */
u32 TcbCompBase; /* Code = 0 (extended), ExCode = 6 */
u32 PadEx6[15];
// ExCode 7 = Transmit ring base address
u32 XmtBase; // Code = 0 (extended), ExCode = 7
/* ExCode 7 = Transmit ring base address */
u32 XmtBase; /* Code = 0 (extended), ExCode = 7 */
u32 PadEx7[15];
// ExCode 8 = Transmit ring size
u32 XmtSize; // Code = 0 (extended), ExCode = 8
/* ExCode 8 = Transmit ring size */
u32 XmtSize; /* Code = 0 (extended), ExCode = 8 */
u32 PadEx8[15];
// ExCode 9 = Receive ring base address
u32 RcvBase; // Code = 0 (extended), ExCode = 9
/* ExCode 9 = Receive ring base address */
u32 RcvBase; /* Code = 0 (extended), ExCode = 9 */
u32 PadEx9[15];
// ExCode 10 = Receive ring size
u32 RcvSize; // Code = 0 (extended), ExCode = 10
/* ExCode 10 = Receive ring size */
u32 RcvSize; /* Code = 0 (extended), ExCode = 10 */
u32 PadEx10[15];
// ExCode 11 = Read EEPROM/Flash Config
u32 Config; // Code = 0 (extended), ExCode = 11
/* ExCode 11 = Read EEPROM/Flash Config */
u32 Config; /* Code = 0 (extended), ExCode = 11 */
u32 PadEx11[15];
// ExCode 12 = Multicast bits 31:0
u32 McastLow; // Code = 0 (extended), ExCode = 12
/* ExCode 12 = Multicast bits 31:0 */
u32 McastLow; /* Code = 0 (extended), ExCode = 12 */
u32 PadEx12[15];
// ExCode 13 = Multicast bits 63:32
u32 McastHigh; // Code = 0 (extended), ExCode = 13
/* ExCode 13 = Multicast bits 63:32 */
u32 McastHigh; /* Code = 0 (extended), ExCode = 13 */
u32 PadEx13[15];
// ExCode 14 = Ping
u32 Ping; // Code = 0 (extended), ExCode = 14
/* ExCode 14 = Ping */
u32 Ping; /* Code = 0 (extended), ExCode = 14 */
u32 PadEx14[15];
// ExCode 15 = Link MTU
u32 LinkMtu; // Code = 0 (extended), ExCode = 15
/* ExCode 15 = Link MTU */
u32 LinkMtu; /* Code = 0 (extended), ExCode = 15 */
u32 PadEx15[15];
// ExCode 16 = Download synchronization
u32 LoadSync; // Code = 0 (extended), ExCode = 16
/* ExCode 16 = Download synchronization */
u32 LoadSync; /* Code = 0 (extended), ExCode = 16 */
u32 PadEx16[15];
// ExCode 17 = Upper DRAM address bits on 32-bit systems
u32 Upper; // Code = 0 (extended), ExCode = 17
/* ExCode 17 = Upper DRAM address bits on 32-bit systems */
u32 Upper; /* Code = 0 (extended), ExCode = 17 */
u32 PadEx17[15];
// ExCode 18 = Slowpath Send Index Address
u32 SPSendIndex; // Code = 0 (extended), ExCode = 18
/* ExCode 18 = Slowpath Send Index Address */
u32 SPSendIndex; /* Code = 0 (extended), ExCode = 18 */
u32 PadEx18[15];
// ExCode 19 = Get ucode statistics
u32 GetUcodeStats; // Code = 0 (extended), ExCode = 19
/* ExCode 19 = Get ucode statistics */
u32 GetUcodeStats; /* Code = 0 (extended), ExCode = 19 */
u32 PadEx19[15];
// ExCode 20 = Aggregation - See sxgmisc.c:SxgSetInterruptAggregation
u32 Aggregation; // Code = 0 (extended), ExCode = 20
/* ExCode 20 = Aggregation - See sxgmisc.c:SxgSetInterruptAggregation */
u32 Aggregation; /* Code = 0 (extended), ExCode = 20 */
u32 PadEx20[15];
// ExCode 21 = Receive MDL push timer
u32 PushTicks; // Code = 0 (extended), ExCode = 21
/* ExCode 21 = Receive MDL push timer */
u32 PushTicks; /* Code = 0 (extended), ExCode = 21 */
u32 PadEx21[15];
// ExCode 22 = ACK Frequency
u32 AckFrequency; // Code = 0 (extended), ExCode = 22
/* ExCode 22 = ACK Frequency */
u32 AckFrequency; /* Code = 0 (extended), ExCode = 22 */
u32 PadEx22[15];
// ExCode 23 = TOE NA
/* ExCode 23 = TOE NA */
u32 RsvdReg23;
u32 PadEx23[15];
// ExCode 24 = TOE NA
/* ExCode 24 = TOE NA */
u32 RsvdReg24;
u32 PadEx24[15];
// ExCode 25 = TOE NA
u32 RsvdReg25; // Code = 0 (extended), ExCode = 25
/* ExCode 25 = TOE NA */
u32 RsvdReg25; /* Code = 0 (extended), ExCode = 25 */
u32 PadEx25[15];
// ExCode 26 = Receive checksum requirements
u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26
/* ExCode 26 = Receive checksum requirements */
u32 ReceiveChecksum; /* Code = 0 (extended), ExCode = 26 */
u32 PadEx26[15];
// ExCode 27 = RSS Requirements
u32 Rss; // Code = 0 (extended), ExCode = 27
/* ExCode 27 = RSS Requirements */
u32 Rss; /* Code = 0 (extended), ExCode = 27 */
u32 PadEx27[15];
// ExCode 28 = RSS Table
u32 RssTable; // Code = 0 (extended), ExCode = 28
/* ExCode 28 = RSS Table */
u32 RssTable; /* Code = 0 (extended), ExCode = 28 */
u32 PadEx28[15];
// ExCode 29 = Event ring release entries
u32 EventRelease; // Code = 0 (extended), ExCode = 29
/* ExCode 29 = Event ring release entries */
u32 EventRelease; /* Code = 0 (extended), ExCode = 29 */
u32 PadEx29[15];
// ExCode 30 = Number of receive bufferlist commands on ring 0
u32 RcvCmd; // Code = 0 (extended), ExCode = 30
/* ExCode 30 = Number of receive bufferlist commands on ring 0 */
u32 RcvCmd; /* Code = 0 (extended), ExCode = 30 */
u32 PadEx30[15];
// ExCode 31 = slowpath transmit command - Data[31:0] = 1
u32 XmtCmd; // Code = 0 (extended), ExCode = 31
/* ExCode 31 = slowpath transmit command - Data[31:0] = 1 */
u32 XmtCmd; /* Code = 0 (extended), ExCode = 31 */
u32 PadEx31[15];
// ExCode 32 = Dump command
u32 DumpCmd; // Code = 0 (extended), ExCode = 32
/* ExCode 32 = Dump command */
u32 DumpCmd; /* Code = 0 (extended), ExCode = 32 */
u32 PadEx32[15];
// ExCode 33 = Debug command
u32 DebugCmd; // Code = 0 (extended), ExCode = 33
/* ExCode 33 = Debug command */
u32 DebugCmd; /* Code = 0 (extended), ExCode = 33 */
u32 PadEx33[15];
// There are 128 possible extended commands - each of account for 16
// words (including the non-relevent base command codes 1-15).
// Pad for the remainder of these here to bring us to the next CPU
// base. As extended codes are added, reduce the first array value in
// the following field
u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33)
/*
* There are 128 possible extended commands - each of account for 16
* words (including the non-relevent base command codes 1-15).
* Pad for the remainder of these here to bring us to the next CPU
* base. As extended codes are added, reduce the first array value in
* the following field
*/
u32 PadToNextCpu[94][16]; /* 94 = 128 - 34 (34 = Excodes 0 - 33) */
};
// Interrupt control register (0) values
/* Interrupt control register (0) values */
#define SXG_ICR_DISABLE 0x00000000
#define SXG_ICR_ENABLE 0x00000001
#define SXG_ICR_MASK 0x00000002
......@@ -142,36 +142,39 @@ struct sxg_ucode_regs {
((((_MessageId) << SXG_ICR_MSGID_SHIFT) & \
SXG_ICR_MSGID_MASK) | (_Data))
#define SXG_MIN_AGG_DEFAULT 0x0010 // Minimum aggregation default
#define SXG_MAX_AGG_DEFAULT 0x0040 // Maximum aggregation default
#define SXG_MAX_AGG_SHIFT 16 // Maximum in top 16 bits of register
#define SXG_AGG_XMT_DISABLE 0x80000000 // Disable interrupt aggregation on xmt
#define SXG_MIN_AGG_DEFAULT 0x0010 /* Minimum aggregation default */
#define SXG_MAX_AGG_DEFAULT 0x0040 /* Maximum aggregation default */
#define SXG_MAX_AGG_SHIFT 16 /* Maximum in top 16 bits of register */
#define SXG_AGG_XMT_DISABLE 0x80000000 /* Disable interrupt aggregation on xmt */
// The Microcode supports up to 8 RSS queues
/* The Microcode supports up to 8 RSS queues */
#define SXG_MAX_RSS 8
#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max
#define SXG_MAX_RSS_TABLE_SIZE 256 /* 256-byte max */
#define SXG_RSS_TCP6 0x00000001 /* RSS TCP over IPv6 */
#define SXG_RSS_TCP4 0x00000002 /* RSS TCP over IPv4 */
#define SXG_RSS_LEGACY 0x00000004 /* Line-base interrupts */
#define SXG_RSS_TABLE_SIZE 0x0000FF00 /* Table size mask */
#define SXG_RSS_TCP6 0x00000001 // RSS TCP over IPv6
#define SXG_RSS_TCP4 0x00000002 // RSS TCP over IPv4
#define SXG_RSS_LEGACY 0x00000004 // Line-base interrupts
#define SXG_RSS_TABLE_SIZE 0x0000FF00 // Table size mask
#define SXG_RSS_TABLE_SHIFT 8
#define SXG_RSS_BASE_CPU 0x00FF0000 // Base CPU (not used)
#define SXG_RSS_BASE_CPU 0x00FF0000 /* Base CPU (not used) */
#define SXG_RSS_BASE_SHIFT 16
#define SXG_RCV_IP_CSUM_ENABLED 0x00000001 // ExCode 26 (ReceiveChecksum)
#define SXG_RCV_TCP_CSUM_ENABLED 0x00000002 // ExCode 26 (ReceiveChecksum)
#define SXG_RCV_IP_CSUM_ENABLED 0x00000001 /* ExCode 26 (ReceiveChecksum) */
#define SXG_RCV_TCP_CSUM_ENABLED 0x00000002 /* ExCode 26 (ReceiveChecksum) */
#define SXG_XMT_CPUID_SHIFT 16
// Status returned by ucode in the ConfigStat reg (see above) when attempted
// to load configuration data from the EEPROM/Flash.
#define SXG_CFG_TIMEOUT 1 // init value - timeout if unchanged
#define SXG_CFG_LOAD_EEPROM 2 // config data loaded from EEPROM
#define SXG_CFG_LOAD_FLASH 3 // config data loaded from flash
#define SXG_CFG_LOAD_INVALID 4 // no valid config data found
#define SXG_CFG_LOAD_ERROR 5 // hardware error
/*
* Status returned by ucode in the ConfigStat reg (see above) when attempted
* to load configuration data from the EEPROM/Flash.
*/
#define SXG_CFG_TIMEOUT 1 /* init value - timeout if unchanged */
#define SXG_CFG_LOAD_EEPROM 2 /* config data loaded from EEPROM */
#define SXG_CFG_LOAD_FLASH 3 /* config data loaded from flash */
#define SXG_CFG_LOAD_INVALID 4 /* no valid config data found */
#define SXG_CFG_LOAD_ERROR 5 /* hardware error */
#define SXG_CHECK_FOR_HANG_TIME 5
......@@ -220,29 +223,26 @@ struct sxg_tcb_regs {
* ||---|-CpuId of crash
* |----/
***************************************************************************/
#define SXG_ISR_ERR 0x80000000 // Error
#define SXG_ISR_EVENT 0x40000000 // Event ring event
#define SXG_ISR_NONE1 0x20000000 // Not used
#define SXG_ISR_UPC 0x10000000 // Dump/debug command complete
#define SXG_ISR_LINK 0x08000000 // Link event
#define SXG_ISR_PDQF 0x04000000 // Processed data queue full
#define SXG_ISR_RMISS 0x02000000 // Drop - no host buf
#define SXG_ISR_BREAK 0x01000000 // Breakpoint hit
#define SXG_ISR_PING 0x00800000 // Heartbeat response
#define SXG_ISR_DEAD 0x00400000 // Card crash
#define SXG_ISR_ERFULL 0x00200000 // Event ring full
#define SXG_ISR_XDROP 0x00100000 // XMT Drop - no DRAM bufs or XMT err
#define SXG_ISR_SPSEND 0x00080000 // Slow send complete
#define SXG_ISR_CPU 0x00070000 // Dead CPU mask
#define SXG_ISR_CPU_SHIFT 16 // Dead CPU shift
#define SXG_ISR_CRASH 0x0000FFFF // Crash address mask
#define SXG_ISR_ERR 0x80000000 /* Error */
#define SXG_ISR_EVENT 0x40000000 /* Event ring event */
#define SXG_ISR_NONE1 0x20000000 /* Not used */
#define SXG_ISR_UPC 0x10000000 /* Dump/debug command complete */
#define SXG_ISR_LINK 0x08000000 /* Link event */
#define SXG_ISR_PDQF 0x04000000 /* Processed data queue full */
#define SXG_ISR_RMISS 0x02000000 /* Drop - no host buf */
#define SXG_ISR_BREAK 0x01000000 /* Breakpoint hit */
#define SXG_ISR_PING 0x00800000 /* Heartbeat response */
#define SXG_ISR_DEAD 0x00400000 /* Card crash */
#define SXG_ISR_ERFULL 0x00200000 /* Event ring full */
#define SXG_ISR_XDROP 0x00100000 /* XMT Drop - no DRAM bufs or XMT err */
#define SXG_ISR_SPSEND 0x00080000 /* Slow send complete */
#define SXG_ISR_CPU 0x00070000 /* Dead CPU mask */
#define SXG_ISR_CPU_SHIFT 16 /* Dead CPU shift */
#define SXG_ISR_CRASH 0x0000FFFF /* Crash address mask */
/***************************************************************************
*
* Event Ring entry
*
***************************************************************************/
/*
* 31 15 0
* .___________________.___________________.
* |<------------ Pad 0 ------------>|
......@@ -284,80 +284,80 @@ struct sxg_tcb_regs {
* ||------- ISTCPIP
* |-------- SCERR
*
*/
************************************************************************/
#pragma pack(push, 1)
struct sxg_event {
u32 Pad[1]; // not used
u32 SndUna; // SndUna value
u32 Resid; // receive MDL resid
u32 Pad[1]; /* not used */
u32 SndUna; /* SndUna value */
u32 Resid; /* receive MDL resid */
union {
void *HostHandle; // Receive host handle
u32 Rsvd1; // TOE NA
void * HostHandle; /* Receive host handle */
u32 Rsvd1; /* TOE NA */
struct {
u32 NotUsed;
u32 Rsvd2; // TOE NA
u32 Rsvd2; /* TOE NA */
} Flush;
};
u32 Toeplitz; // RSS Toeplitz hash
u32 Toeplitz; /* RSS Toeplitz hash */
union {
ushort Rsvd3; // TOE NA
ushort HdrOffset; // Slowpath
ushort Rsvd3; /* TOE NA */
ushort HdrOffset; /* Slowpath */
};
ushort Length; //
unsigned char Rsvd4; // TOE NA
unsigned char Code; // Event code
unsigned char CommandIndex; // New ring index
unsigned char Status; // Event status
ushort Length;
unsigned char Rsvd4; /* TOE NA */
unsigned char Code; /* Event code */
unsigned char CommandIndex; /* New ring index */
unsigned char Status; /* Event status */
};
#pragma pack(pop)
// Event code definitions
#define EVENT_CODE_BUFFERS 0x01 // Receive buffer list command (ring 0)
#define EVENT_CODE_SLOWRCV 0x02 // Slowpath receive
#define EVENT_CODE_UNUSED 0x04 // Was slowpath commands complete
// Status values
#define EVENT_STATUS_VALID 0x80 // Entry valid
// Slowpath status
#define EVENT_STATUS_ERROR 0x40 // Completed with error. Index in next byte
#define EVENT_STATUS_TCPIP4 0x20 // TCPIPv4 frame
#define EVENT_STATUS_TCPBAD 0x10 // Bad TCP checksum
#define EVENT_STATUS_IPBAD 0x08 // Bad IP checksum
#define EVENT_STATUS_RCVERR 0x04 // Slowpath receive error
#define EVENT_STATUS_IPONLY 0x02 // IP frame
#define EVENT_STATUS_TCPIP6 0x01 // TCPIPv6 frame
#define EVENT_STATUS_TCPIP 0x21 // Combination of v4 and v6
// Event ring
// Size must be power of 2, between 128 and 16k
#define EVENT_RING_SIZE 4096 // ??
#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time.
#define EVENT_BATCH_LIMIT 256 // Stop processing events after 4096 (256 * 16)
/* Event code definitions */
#define EVENT_CODE_BUFFERS 0x01 /* Receive buffer list command (ring 0) */
#define EVENT_CODE_SLOWRCV 0x02 /* Slowpath receive */
#define EVENT_CODE_UNUSED 0x04 /* Was slowpath commands complete */
/* Status values */
#define EVENT_STATUS_VALID 0x80 /* Entry valid */
/* Slowpath status */
#define EVENT_STATUS_ERROR 0x40 /* Completed with error. Index in next byte */
#define EVENT_STATUS_TCPIP4 0x20 /* TCPIPv4 frame */
#define EVENT_STATUS_TCPBAD 0x10 /* Bad TCP checksum */
#define EVENT_STATUS_IPBAD 0x08 /* Bad IP checksum */
#define EVENT_STATUS_RCVERR 0x04 /* Slowpath receive error */
#define EVENT_STATUS_IPONLY 0x02 /* IP frame */
#define EVENT_STATUS_TCPIP6 0x01 /* TCPIPv6 frame */
#define EVENT_STATUS_TCPIP 0x21 /* Combination of v4 and v6 */
/*
* Event ring
* Size must be power of 2, between 128 and 16k
*/
#define EVENT_RING_SIZE 4096
#define EVENT_RING_BATCH 16 /* Hand entries back 16 at a time. */
#define EVENT_BATCH_LIMIT 256 /* Stop processing events after 4096 (256 * 16) */
struct sxg_event_ring {
struct sxg_event Ring[EVENT_RING_SIZE];
};
/***************************************************************************
*
* TCB Buffers
*
***************************************************************************/
// Maximum number of TCBS supported by hardware/microcode
/* TCB Buffers */
/* Maximum number of TCBS supported by hardware/microcode */
#define SXG_MAX_TCB 4096
// Minimum TCBs before we fail initialization
/* Minimum TCBs before we fail initialization */
#define SXG_MIN_TCB 512
// TCB Hash
// The bucket is determined by bits 11:4 of the toeplitz if we support 4k
// offloaded connections, 10:4 if we support 2k and so on.
/*
* TCB Hash
* The bucket is determined by bits 11:4 of the toeplitz if we support 4k
* offloaded connections, 10:4 if we support 2k and so on.
*/
#define SXG_TCB_BUCKET_SHIFT 4
#define SXG_TCB_PER_BUCKET 16
#define SXG_TCB_BUCKET_MASK 0xFF0 // Bucket portion of TCB ID
#define SXG_TCB_ELEMENT_MASK 0x00F // Element within bucket
#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k
#define SXG_TCB_BUCKET_MASK 0xFF0 /* Bucket portion of TCB ID */
#define SXG_TCB_ELEMENT_MASK 0x00F /* Element within bucket */
#define SXG_TCB_BUCKETS 256 /* 256 * 16 = 4k */
#define SXG_TCB_BUFFER_SIZE 512 // ASSERT format is correct
#define SXG_TCB_BUFFER_SIZE 512 /* ASSERT format is correct */
#define SXG_TCB_RCVQ_SIZE 736
......@@ -383,12 +383,14 @@ struct sxg_event_ring {
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
#if DBG
// Horrible kludge to distinguish dumb-nic, slowpath, and
// fastpath traffic. Decrement the HopLimit by one
// for slowpath, two for fastpath. This assumes the limit is measurably
// greater than two, which I think is reasonable.
// Obviously this is DBG only. Maybe remove later, or #if 0 so we
// can set it when needed
/*
* Horrible kludge to distinguish dumb-nic, slowpath, and
* fastpath traffic. Decrement the HopLimit by one
* for slowpath, two for fastpath. This assumes the limit is measurably
* greater than two, which I think is reasonable.
* Obviously this is DBG only. Maybe remove later, or #if 0 so we
* can set it when needed
*/
#define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) { \
PIPV6_HDR _Ip6FrameHdr; \
if((_TcpObject)->IPv6) { \
......@@ -401,24 +403,22 @@ struct sxg_event_ring {
} \
}
#else
// Do nothing with free build
/* Do nothing with free build */
#define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath)
#endif
/***************************************************************************
* Receive and transmit rings
***************************************************************************/
/* Receive and transmit rings */
#define SXG_MAX_RING_SIZE 256
#define SXG_XMT_RING_SIZE 128 // Start with 128
#define SXG_RCV_RING_SIZE 128 // Start with 128
#define SXG_XMT_RING_SIZE 128 /* Start with 128 */
#define SXG_RCV_RING_SIZE 128 /* Start with 128 */
#define SXG_MAX_ENTRIES 4096
// Structure and macros to manage a ring
/* Structure and macros to manage a ring */
struct sxg_ring_info {
unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE
unsigned char Tail; // Where we pull off completed entries
ushort Size; // Ring size - Must be multiple of 2
void *Context[SXG_MAX_RING_SIZE]; // Shadow ring
unsigned char Head; /* Where we add entries - Note unsigned char:RING_SIZE */
unsigned char Tail; /* Where we pull off completed entries */
ushort Size; /* Ring size - Must be multiple of 2 */
void * Context[SXG_MAX_RING_SIZE]; /* Shadow ring */
};
#define SXG_INITIALIZE_RING(_ring, _size) { \
......@@ -437,9 +437,11 @@ struct sxg_ring_info {
ASSERT((_ring)->Tail != (_ring)->Head); \
SXG_ADVANCE_INDEX((_ring)->Tail, ((_ring)->Size)); \
}
// Set cmd to the next available ring entry, set the shadow context
// entry and advance the ring.
// The appropriate lock must be held when calling this macro
/*
* Set cmd to the next available ring entry, set the shadow context
* entry and advance the ring.
* The appropriate lock must be held when calling this macro
*/
#define SXG_GET_CMD(_ring, _ringinfo, _cmd, _context) { \
if(SXG_RING_FULL(_ringinfo)) { \
(_cmd) = NULL; \
......@@ -450,17 +452,21 @@ struct sxg_ring_info {
} \
}
// Abort the previously allocated command by retreating the head.
// NOTE - The appopriate lock MUST NOT BE DROPPED between the SXG_GET_CMD
// and SXG_ABORT_CMD calls.
/*
* Abort the previously allocated command by retreating the head.
* NOTE - The appopriate lock MUST NOT BE DROPPED between the SXG_GET_CMD
* and SXG_ABORT_CMD calls.
*/
#define SXG_ABORT_CMD(_ringinfo) { \
ASSERT(!(SXG_RING_EMPTY(_ringinfo))); \
SXG_RING_RETREAT_HEAD(_ringinfo); \
(_ringinfo)->Context[(_ringinfo)->Head] = NULL; \
}
// For the given ring, return a pointer to the tail cmd and context,
// clear the context and advance the tail
/*
* For the given ring, return a pointer to the tail cmd and context,
* clear the context and advance the tail
*/
#define SXG_RETURN_CMD(_ring, _ringinfo, _cmd, _context) { \
(_cmd) = &(_ring)->Descriptors[(_ringinfo)->Tail]; \
(_context) = (_ringinfo)->Context[(_ringinfo)->Tail]; \
......@@ -468,12 +474,9 @@ struct sxg_ring_info {
SXG_RING_ADVANCE_TAIL(_ringinfo); \
}
/***************************************************************************
*
/***************************************************************
* Host Command Buffer - commands to INIC via the Cmd Rings
*
***************************************************************************/
/*
* 31 15 0
* .___________________.___________________.
* |<-------------- Sgl Low -------------->|
......@@ -493,42 +496,42 @@ struct sxg_ring_info {
* |_________|_________|_________|_________|24 0x18
* |<----- LCnt ------>|<----- Flags ----->|
* |_________|_________|_________|_________|28 0x1c
*/
****************************************************************/
#pragma pack(push, 1)
struct sxg_cmd {
dma_addr_t Sgl; // Physical address of SGL
dma_addr_t Sgl; /* Physical address of SGL */
union {
struct {
dma64_addr_t FirstSgeAddress; // Address of first SGE
u32 FirstSgeLength; // Length of first SGE
dma64_addr_t FirstSgeAddress; /* Address of first SGE */
u32 FirstSgeLength; /* Length of first SGE */
union {
u32 Rsvd1; // TOE NA
u32 SgeOffset; // Slowpath - 2nd SGE offset
u32 Resid; // MDL completion - clobbers update
u32 Rsvd1; /* TOE NA */
u32 SgeOffset; /* Slowpath - 2nd SGE offset */
u32 Resid; /* MDL completion - clobbers update */
};
union {
u32 TotalLength; // Total transfer length
u32 Mss; // LSO MSS
u32 TotalLength; /* Total transfer length */
u32 Mss; /* LSO MSS */
};
} Buffer;
};
union {
struct {
unsigned char Flags:4; // slowpath flags
unsigned char IpHl:4; // Ip header length (>>2)
unsigned char MacLen; // Mac header len
unsigned char Flags:4; /* slowpath flags */
unsigned char IpHl:4; /* Ip header length (>>2) */
unsigned char MacLen; /* Mac header len */
} CsumFlags;
struct {
ushort Flags:4; // slowpath flags
ushort TcpHdrOff:7; // TCP
ushort MacLen:5; // Mac header len
ushort Flags:4; /* slowpath flags */
ushort TcpHdrOff:7; /* TCP */
ushort MacLen:5; /* Mac header len */
} LsoFlags;
ushort Flags; // flags
ushort Flags; /* flags */
};
union {
ushort SgEntries; // SG entry count including first sge
ushort SgEntries; /* SG entry count including first sge */
struct {
unsigned char Status; // Copied from event status
unsigned char Status; /* Copied from event status */
unsigned char NotUsed;
} Status;
};
......@@ -542,7 +545,7 @@ struct vlan_hdr {
};
#pragma pack(pop)
/*
/********************************************************************
* Slowpath Flags:
*
*
......@@ -572,11 +575,11 @@ struct vlan_hdr {
* | LCnt |MAC hlen |Hlen|Flgs|
* |___________________|____|____|____|____|
*
*/
// Slowpath CMD flags
#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP
#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP
#define SXG_SLOWCMD_LSO 0x04 // Large segment send
*****************************************************************/
/* Slowpath CMD flags */
#define SXG_SLOWCMD_CSUM_IP 0x01 /* Checksum IP */
#define SXG_SLOWCMD_CSUM_TCP 0x02 /* Checksum TCP */
#define SXG_SLOWCMD_LSO 0x04 /* Large segment send */
struct sxg_xmt_ring {
struct sxg_cmd Descriptors[SXG_XMT_RING_SIZE];
......@@ -586,22 +589,22 @@ struct sxg_rcv_ring {
struct sxg_cmd Descriptors[SXG_RCV_RING_SIZE];
};
/***************************************************************************
/*
* Share memory buffer types - Used to identify asynchronous
* shared memory allocation
***************************************************************************/
*/
enum sxg_buffer_type {
SXG_BUFFER_TYPE_RCV, // Receive buffer
SXG_BUFFER_TYPE_SGL // SGL buffer
SXG_BUFFER_TYPE_RCV, /* Receive buffer */
SXG_BUFFER_TYPE_SGL /* SGL buffer */
};
// State for SXG buffers
/* State for SXG buffers */
#define SXG_BUFFER_FREE 0x01
#define SXG_BUFFER_BUSY 0x02
#define SXG_BUFFER_ONCARD 0x04
#define SXG_BUFFER_UPSTREAM 0x08
/***************************************************************************
/*
* Receive data buffers
*
* Receive data buffers are given to the Sahara card 128 at a time.
......@@ -677,67 +680,71 @@ enum sxg_buffer_type {
* + struct sxg_rcv_block_hdr = ~32
* => Total = ~1282k/block
*
***************************************************************************/
#define SXG_RCV_DATA_BUFFERS 8192 // Amount to give to the card
#define SXG_INITIAL_RCV_DATA_BUFFERS 16384 // Initial pool of buffers
#define SXG_MIN_RCV_DATA_BUFFERS 4096 // Minimum amount and when to get more
#define SXG_MAX_RCV_BLOCKS 256 // = 32k receive buffers
*/
#define SXG_RCV_DATA_BUFFERS 8192 /* Amount to give to the card */
#define SXG_INITIAL_RCV_DATA_BUFFERS 16384 /* Initial pool of buffers */
#define SXG_MIN_RCV_DATA_BUFFERS 4096 /* Minimum amount and when to get more */
#define SXG_MAX_RCV_BLOCKS 256 /* = 32k receive buffers */
// Receive buffer header
/* Receive buffer header */
struct sxg_rcv_data_buffer_hdr {
dma_addr_t PhysicalAddress; // Buffer physical address
// Note - DO NOT USE the VirtualAddress field to locate data.
// Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
void *VirtualAddress; // Start of buffer
u32 Size; // Buffer size
struct sxg_rcv_data_buffer_hdr *Next; // Fastpath data buffer queue
struct list_entry FreeList; // Free queue of buffers
unsigned char State; // See SXG_BUFFER state above
unsigned char Status; // Event status (to log PUSH)
struct sk_buff *skb; // Double mapped (nbl and pkt)
dma_addr_t PhysicalAddress; /* Buffer physical address */
/*
* Note - DO NOT USE the VirtualAddress field to locate data.
* Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
*/
void *VirtualAddress; /* Start of buffer */
u32 Size; /* Buffer size */
struct sxg_rcv_data_buffer_hdr *Next; /* Fastpath data buffer queue */
struct list_entry FreeList; /* Free queue of buffers */
unsigned char State; /* See SXG_BUFFER state above */
unsigned char Status; /* Event status (to log PUSH) */
struct sk_buff * skb; /* Double mapped (nbl and pkt) */
};
// SxgSlowReceive uses the PACKET (skb) contained
// in the struct sxg_rcv_data_buffer_hdr when indicating dumb-nic data
/*
* SxgSlowReceive uses the PACKET (skb) contained
* in the struct sxg_rcv_data_buffer_hdr when indicating dumb-nic data
*/
#define SxgDumbRcvPacket skb
#define SXG_RCV_DATA_HDR_SIZE 256 // Space for struct sxg_rcv_data_buffer_hdr
#define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR
#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR
#define SXG_RCV_DATA_HDR_SIZE 256 /* Space for struct sxg_rcv_data_buffer_hdr */
#define SXG_RCV_DATA_BUFFER_SIZE 2048 /* Non jumbo = 2k including HDR */
#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 /* jumbo = 10k including HDR */
// Receive data descriptor
/* Receive data descriptor */
struct sxg_rcv_data_descriptor {
union {
struct sk_buff *VirtualAddress; // Host handle
u64 ForceTo8Bytes; // Force x86 to 8-byte boundary
struct sk_buff *VirtualAddress; /* Host handle */
u64 ForceTo8Bytes; /* Force x86 to 8-byte boundary */
};
dma_addr_t PhysicalAddress;
};
// Receive descriptor block
/* Receive descriptor block */
#define SXG_RCV_DESCRIPTORS_PER_BLOCK 128
#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check
#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 /* For sanity check */
struct sxg_rcv_descriptor_block {
struct sxg_rcv_data_descriptor Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
};
// Receive descriptor block header
/* Receive descriptor block header */
struct sxg_rcv_descriptor_block_hdr {
void *VirtualAddress; // Start of 2k buffer
dma_addr_t PhysicalAddress; // ..and it's physical address
struct list_entry FreeList; // Free queue of descriptor blocks
unsigned char State; // See SXG_BUFFER state above
void *VirtualAddress; /* start of 2k buffer */
dma_addr_t PhysicalAddress; /* ..and it's physical address */
struct list_entry FreeList; /* free queue of descriptor blocks */
unsigned char State; /* see sxg_buffer state above */
};
// Receive block header
/* Receive block header */
struct sxg_rcv_block_hdr {
void *VirtualAddress; // Start of virtual memory
dma_addr_t PhysicalAddress; // ..and it's physical address
struct list_entry AllList; // Queue of all SXG_RCV_BLOCKS
void *VirtualAddress; /* Start of virtual memory */
dma_addr_t PhysicalAddress; /* ..and it's physical address */
struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS */
};
// Macros to determine data structure offsets into receive block
/* Macros to determine data structure offsets into receive block */
#define SXG_RCV_BLOCK_SIZE(_Buffersize) \
(((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
(sizeof(struct sxg_rcv_descriptor_block)) + \
......@@ -757,61 +764,63 @@ struct sxg_rcv_block_hdr {
(sizeof(struct sxg_rcv_descriptor_block)) + \
(sizeof(struct sxg_rcv_descriptor_block_hdr)))
/***************************************************************************
* Scatter gather list buffer
***************************************************************************/
#define SXG_INITIAL_SGL_BUFFERS 8192 // Initial pool of SGL buffers
#define SXG_MIN_SGL_BUFFERS 2048 // Minimum amount and when to get more
#define SXG_MAX_SGL_BUFFERS 16384 // Maximum to allocate (note ADAPT:ushort)
// SXG_SGL_POOL_PROPERTIES - This structure is used to define a pool of SGL buffers.
// These buffers are allocated out of shared memory and used to
// contain a physical scatter gather list structure that is shared
// with the card.
//
// We split our SGL buffers into multiple pools based on size. The motivation
// is that some applications perform very large I/Os (1MB for example), so
// we need to be able to allocate an SGL to accommodate such a request.
// But such an SGL would require 256 24-byte SG entries - ~6k.
// Given that the vast majority of I/Os are much smaller than 1M, allocating
// a single pool of SGL buffers would be a horribly inefficient use of
// memory.
//
// The following structure includes two fields relating to its size.
// The NBSize field specifies the largest NET_BUFFER that can be handled
// by the particular pool. The SGEntries field defines the size, in
// entries, of the SGL for that pool. The SGEntries is determined by
// dividing the NBSize by the expected page size (4k), and then padding
// it by some appropriate amount as insurance (20% or so..??).
/* Scatter gather list buffer */
#define SXG_INITIAL_SGL_BUFFERS 8192 /* Initial pool of SGL buffers */
#define SXG_MIN_SGL_BUFFERS 2048 /* Minimum amount and when to get more */
#define SXG_MAX_SGL_BUFFERS 16384 /* Maximum to allocate (note ADAPT:ushort) */
/*
* SXG_SGL_POOL_PROPERTIES - This structure is used to define a pool of SGL buffers.
* These buffers are allocated out of shared memory and used to
* contain a physical scatter gather list structure that is shared
* with the card.
*
* We split our SGL buffers into multiple pools based on size. The motivation
* is that some applications perform very large I/Os (1MB for example), so
* we need to be able to allocate an SGL to accommodate such a request.
* But such an SGL would require 256 24-byte SG entries - ~6k.
* Given that the vast majority of I/Os are much smaller than 1M, allocating
* a single pool of SGL buffers would be a horribly inefficient use of
* memory.
*
* The following structure includes two fields relating to its size.
* The NBSize field specifies the largest NET_BUFFER that can be handled
* by the particular pool. The SGEntries field defines the size, in
* entries, of the SGL for that pool. The SGEntries is determined by
* dividing the NBSize by the expected page size (4k), and then padding
* it by some appropriate amount as insurance (20% or so..??).
*/
struct sxg_sgl_pool_properties {
u32 NBSize; // Largest NET_BUFFER size for this pool
ushort SGEntries; // Number of entries in SGL
ushort InitialBuffers; // Number to allocate at initializationtime
ushort MinBuffers; // When to get more
ushort MaxBuffers; // When to stop
ushort PerCpuThreshold;// See sxgh.h:SXG_RESOURCES
u32 NBSize; /* Largest NET_BUFFER size for this pool */
ushort SGEntries; /* Number of entries in SGL */
ushort InitialBuffers; /* Number to allocate at initializationtime */
ushort MinBuffers; /* When to get more */
ushort MaxBuffers; /* When to stop */
ushort PerCpuThreshold;/* See sxgh.h:SXG_RESOURCES */
};
// At the moment I'm going to statically initialize 4 pools:
// 100k buffer pool: The vast majority of the expected buffers are expected to
// be less than or equal to 100k. At 30 entries per and
// 8k initial buffers amounts to ~4MB of memory
// NOTE - This used to be 64K with 20 entries, but during
// WHQL NDIS 6.0 Testing (2c_mini6stress) MS does their
// best to send absurd NBL's with ridiculous SGLs, we
// have received 400byte sends contained in SGL's that
// have 28 entries
// 1M buffer pool: Buffers between 64k and 1M. Allocate 256 initial buffers
// with 300 entries each => ~2MB of memory
// 5M buffer pool: Not expected often, if at all. 32 initial buffers
// at 1500 entries each => ~1MB of memory
// 10M buffer pool: Not expected at all, except under pathelogical conditions.
// Allocate one at initialization time.
// Note - 10M is the current limit of what we can
// realistically support due to the sahara SGL
// bug described in the SAHARA SGL WORKAROUND below
//
// We will likely adjust the number of pools and/or pool properties over time..
/*
* At the moment I'm going to statically initialize 4 pools:
* 100k buffer pool: The vast majority of the expected buffers are expected to
* be less than or equal to 100k. At 30 entries per and
* 8k initial buffers amounts to ~4MB of memory
* NOTE - This used to be 64K with 20 entries, but during
* WHQL NDIS 6.0 Testing (2c_mini6stress) MS does their
* best to send absurd NBL's with ridiculous SGLs, we
* have received 400byte sends contained in SGL's that
* have 28 entries
* 1M buffer pool: Buffers between 64k and 1M. Allocate 256 initial buffers
* with 300 entries each => ~2MB of memory
* 5M buffer pool: Not expected often, if at all. 32 initial buffers
* at 1500 entries each => ~1MB of memory
* 10M buffer pool: Not expected at all, except under pathelogical conditions.
* Allocate one at initialization time.
* Note - 10M is the current limit of what we can
* realistically support due to the sahara SGL
* bug described in the SAHARA SGL WORKAROUND below
*
* We will likely adjust the number of pools and/or pool properties over time..
*/
#define SXG_NUM_SGL_POOLS 4
#define INITIALIZE_SGL_POOL_PROPERTIES \
struct sxg_sgl_pool_properties SxgSglPoolProperties[SXG_NUM_SGL_POOLS] = \
......@@ -827,88 +836,97 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[];
#define SXG_MAX_SGL_BUFFER_SIZE \
SxgSglPoolProperties[SXG_NUM_SGL_POOLS - 1].NBSize
// SAHARA SGL WORKAROUND!!
// The current Sahara card uses a 16-bit counter when advancing
// SGL address locations. This means that if an SGL crosses
// a 64k boundary, the hardware will actually skip back to
// the start of the previous 64k boundary, with obviously
// undesirable results.
//
// We currently workaround this issue by allocating SGL buffers
// in 64k blocks and skipping over buffers that straddle the boundary.
/*
* SAHARA SGL WORKAROUND!!
* The current Sahara card uses a 16-bit counter when advancing
* SGL address locations. This means that if an SGL crosses
* a 64k boundary, the hardware will actually skip back to
* the start of the previous 64k boundary, with obviously
* undesirable results.
*
* We currently workaround this issue by allocating SGL buffers
* in 64k blocks and skipping over buffers that straddle the boundary.
*/
#define SXG_INVALID_SGL(_SxgSgl) \
(((_SxgSgl)->PhysicalAddress.LowPart & 0xFFFF0000) != \
(((_SxgSgl)->PhysicalAddress.LowPart + \
SXG_SGL_SIZE((_SxgSgl)->Pool)) & 0xFFFF0000))
// Allocate SGLs in blocks so we can skip over invalid entries.
// We allocation 64k worth of SGL buffers, including the
// struct sxg_sgl_block_hdr, plus one for padding
/*
* Allocate SGLs in blocks so we can skip over invalid entries.
* We allocation 64k worth of SGL buffers, including the
* struct sxg_sgl_block_hdr, plus one for padding
*/
#define SXG_SGL_BLOCK_SIZE 65536
#define SXG_SGL_ALLOCATION_SIZE(_Pool) SXG_SGL_BLOCK_SIZE + SXG_SGL_SIZE(_Pool)
struct sxg_sgl_block_hdr {
ushort Pool; // Associated SGL pool
struct list_entry List; // SXG_SCATTER_GATHER blocks
dma64_addr_t PhysicalAddress;// physical address
ushort Pool; /* Associated SGL pool */
struct list_entry List; /* struct sxg_scatter_gather blocks */
dma64_addr_t PhysicalAddress;/* physical address */
};
// The following definition denotes the maximum block of memory that the
// card can DMA to. It is specified in the call to NdisMRegisterScatterGatherDma.
// For now, use the same value as used in the Slic/Oasis driver, which
// is 128M. That should cover any expected MDL that I can think of.
/*
* The following definition denotes the maximum block of memory that the
* card can DMA to. It is specified in the call to NdisMRegisterScatterGatherDma.
* For now, use the same value as used in the Slic/Oasis driver, which
* is 128M. That should cover any expected MDL that I can think of.
*/
#define SXG_MAX_PHYS_MAP (1024 * 1024 * 128)
// Self identifying structure type
/* Self identifying structure type */
enum SXG_SGL_TYPE {
SXG_SGL_DUMB, // Dumb NIC SGL
SXG_SGL_SLOW, // Slowpath protocol header - see below
SXG_SGL_CHIMNEY // Chimney offload SGL
SXG_SGL_DUMB, /* Dumb NIC SGL */
SXG_SGL_SLOW, /* Slowpath protocol header - see below */
SXG_SGL_CHIMNEY /* Chimney offload SGL */
};
// The ucode expects an NDIS SGL structure that
// is formatted for an x64 system. When running
// on an x64 system, we can simply hand the NDIS SGL
// to the card directly. For x86 systems we must reconstruct
// the SGL. The following structure defines an x64
// formatted SGL entry
/*
* The ucode expects an NDIS SGL structure that
* is formatted for an x64 system. When running
* on an x64 system, we can simply hand the NDIS SGL
* to the card directly. For x86 systems we must reconstruct
* the SGL. The following structure defines an x64
* formatted SGL entry
*/
struct sxg_x64_sge {
dma64_addr_t Address; // same as wdm.h
u32 Length; // same as wdm.h
u32 CompilerPad; // The compiler pads to 8-bytes
u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
dma64_addr_t Address; /* same as wdm.h */
u32 Length; /* same as wdm.h */
u32 CompilerPad; /* The compiler pads to 8-bytes */
u64 Reserved; /* u32 * in wdm.h. Force to 8 bytes */
};
// Our SGL structure - Essentially the same as
// wdm.h:SCATTER_GATHER_LIST. Note the variable number of
// elements based on the pool specified above
/*
* Our SGL structure - Essentially the same as
* wdm.h:SCATTER_GATHER_LIST. Note the variable number of
* elements based on the pool specified above
*/
struct sxg_x64_sgl {
u32 NumberOfElements;
u32 *Reserved;
struct sxg_x64_sge Elements[1]; // Variable
struct sxg_x64_sge Elements[1]; /* Variable */
};
struct sxg_scatter_gather {
enum SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload
ushort Pool; // Associated SGL pool
ushort Entries; // SGL total entries
void *adapter; // Back pointer to adapter
struct list_entry FreeList; // Free SXG_SCATTER_GATHER blocks
struct list_entry AllList; // All SXG_SCATTER_GATHER blocks
dma_addr_t PhysicalAddress; // physical address
unsigned char State; // See SXG_BUFFER state above
unsigned char CmdIndex; // Command ring index
struct sk_buff *DumbPacket; // Associated Packet
u32 Direction; // For asynchronous completions
u32 CurOffset; // Current SGL offset
u32 SglRef; // SGL reference count
struct vlan_hdr VlanTag; // VLAN tag to be inserted into SGL
struct sxg_x64_sgl *pSgl; // SGL Addr. Possibly &Sgl
struct sxg_x64_sgl Sgl; // SGL handed to card
enum SXG_SGL_TYPE Type; /* FIRST! Dumb-nic or offload */
ushort Pool; /* Associated SGL pool */
ushort Entries; /* SGL total entries */
void * adapter; /* Back pointer to adapter */
struct list_entry FreeList; /* Free struct sxg_scatter_gather blocks */
struct list_entry AllList; /* All struct sxg_scatter_gather blocks */
dma_addr_t PhysicalAddress;/* physical address */
unsigned char State; /* See SXG_BUFFER state above */
unsigned char CmdIndex; /* Command ring index */
struct sk_buff *DumbPacket; /* Associated Packet */
u32 Direction; /* For asynchronous completions */
u32 CurOffset; /* Current SGL offset */
u32 SglRef; /* SGL reference count */
struct vlan_hdr VlanTag; /* VLAN tag to be inserted into SGL */
struct sxg_x64_sgl *pSgl; /* SGL Addr. Possibly &Sgl */
struct sxg_x64_sgl Sgl; /* SGL handed to card */
};
// Note - the "- 1" is because SXG_SCATTER_GATHER=>struct sxg_x64_sgl includes 1 SGE..
/* Note - the "- 1" is because struct sxg_scatter_gather=>struct sxg_x64_sgl includes 1 SGE.. */
#define SXG_SGL_SIZE(_Pool) \
(sizeof(struct sxg_scatter_gather) + \
((SxgSglPoolProperties[_Pool].SGEntries - 1) * \
......@@ -919,7 +937,7 @@ struct sxg_scatter_gather {
#define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * sizeof(struct sxg_x64_sge))
#define SXG_SGL_BUF_SIZE sizeof(struct sxg_x64_sgl)
#elif defined(CONFIG_X86)
// Force NDIS to give us it's own buffer so we can reformat to our own
/* Force NDIS to give us it's own buffer so we can reformat to our own */
#define SXG_SGL_BUFFER(_SxgSgl) NULL
#define SXG_SGL_BUFFER_LENGTH(_SxgSgl) 0
#define SXG_SGL_BUF_SIZE 0
......@@ -927,18 +945,16 @@ struct sxg_scatter_gather {
#error staging: sxg: driver is for X86 only!
#endif
/***************************************************************************
* Microcode statistics
***************************************************************************/
/* Microcode statistics */
struct sxg_ucode_stats {
u32 RPDQOflow; // PDQ overflow (unframed ie dq & drop 1st)
u32 XDrops; // Xmt drops due to no xmt buffer
u32 ERDrops; // Rcv drops due to ER full
u32 NBDrops; // Rcv drops due to out of host buffers
u32 PQDrops; // Rcv drops due to PDQ full
u32 BFDrops; // Rcv drops due to bad frame: no link addr match, frlen > max
u32 UPDrops; // Rcv drops due to UPFq full
u32 XNoBufs; // Xmt drop due to no DRAM Xmit buffer or PxyBuf
u32 RPDQOflow; /* PDQ overflow (unframed ie dq & drop 1st) */
u32 XDrops; /* Xmt drops due to no xmt buffer */
u32 ERDrops; /* Rcv drops due to ER full */
u32 NBDrops; /* Rcv drops due to out of host buffers */
u32 PQDrops; /* Rcv drops due to PDQ full */
u32 BFDrops; /* Rcv drops due to bad frame: no link addr match, frlen > max */
u32 UPDrops; /* Rcv drops due to UPFq full */
u32 XNoBufs; /* Xmt drop due to no DRAM Xmit buffer or PxyBuf */
};
/*
/*************************************************************
* Copyright 1997-2007 Alacritech, Inc. All rights reserved
*
* $Id: sxghw.h,v 1.2 2008/07/24 17:24:23 chris Exp $
......@@ -7,709 +7,711 @@
*
* This file contains structures and definitions for the
* Alacritech Sahara hardware
*/
*
**********************************************************/
/*******************************************************************************
* PCI Configuration space
*******************************************************************************/
/* PCI Configuration space */
/* PCI Vendor ID */
#define SXG_VENDOR_ID 0x139A /* Alacritech's Vendor ID */
// PCI Device ID
/* PCI Device ID */
#define SXG_DEVICE_ID 0x0009 /* Sahara Device ID */
//
// Subsystem IDs.
//
// The subsystem ID value is broken into bit fields as follows:
// Bits [15:12] - Function
// Bits [11:8] - OEM and/or operating system.
// Bits [7:0] - Base SID.
//
// SSID field (bit) masks
#define SSID_BASE_MASK 0x00FF // Base subsystem ID mask
#define SSID_OEM_MASK 0x0F00 // Subsystem OEM mask
#define SSID_FUNC_MASK 0xF000 // Subsystem function mask
// Base SSID's
#define SSID_SAHARA_PROTO 0x0018 // 100022 Sahara prototype (XenPak) board
#define SSID_SAHARA_FIBER 0x0019 // 100023 Sahara 1-port fiber board
#define SSID_SAHARA_COPPER 0x001A // 100024 Sahara 1-port copper board
// Useful SSID macros
#define SSID_BASE(ssid) ((ssid) & SSID_BASE_MASK) // isolate base SSID bits
#define SSID_OEM(ssid) ((ssid) & SSID_OEM_MASK) // isolate SSID OEM bits
#define SSID_FUNC(ssid) ((ssid) & SSID_FUNC_MASK) // isolate SSID function bits
/*******************************************************************************
* HW Register Space
*******************************************************************************/
#define SXG_HWREG_MEMSIZE 0x4000 // 16k
/*
* Subsystem IDs.
*
* The subsystem ID value is broken into bit fields as follows:
* Bits [15:12] - Function
* Bits [11:8] - OEM and/or operating system.
* Bits [7:0] - Base SID.
*/
/* SSID field (bit) masks */
#define SSID_BASE_MASK 0x00FF /* Base subsystem ID mask */
#define SSID_OEM_MASK 0x0F00 /* Subsystem OEM mask */
#define SSID_FUNC_MASK 0xF000 /* Subsystem function mask */
/* Base SSID's */
#define SSID_SAHARA_PROTO 0x0018 /* 100022 Sahara prototype (XenPak) board */
#define SSID_SAHARA_FIBER 0x0019 /* 100023 Sahara 1-port fiber board */
#define SSID_SAHARA_COPPER 0x001A /* 100024 Sahara 1-port copper board */
/* Useful SSID macros */
#define SSID_BASE(ssid) ((ssid) & SSID_BASE_MASK) /* isolate base SSID bits */
#define SSID_OEM(ssid) ((ssid) & SSID_OEM_MASK) /* isolate SSID OEM bits */
#define SSID_FUNC(ssid) ((ssid) & SSID_FUNC_MASK) /* isolate SSID function bits */
/* HW Register Space */
#define SXG_HWREG_MEMSIZE 0x4000 /* 16k */
#pragma pack(push, 1)
struct sxg_hw_regs {
u32 Reset; // Write 0xdead to invoke soft reset
u32 Pad1; // No register defined at offset 4
u32 InterruptMask0; // Deassert legacy interrupt on function 0
u32 InterruptMask1; // Deassert legacy interrupt on function 1
u32 UcodeDataLow; // Store microcode instruction bits 31-0
u32 UcodeDataMiddle; // Store microcode instruction bits 63-32
u32 UcodeDataHigh; // Store microcode instruction bits 95-64
u32 UcodeAddr; // Store microcode address - See flags below
u32 PadTo0x80[24]; // Pad to Xcv configuration registers
u32 MacConfig0; // 0x80 - AXGMAC Configuration Register 0
u32 MacConfig1; // 0x84 - AXGMAC Configuration Register 1
u32 MacConfig2; // 0x88 - AXGMAC Configuration Register 2
u32 MacConfig3; // 0x8C - AXGMAC Configuration Register 3
u32 MacAddressLow; // 0x90 - AXGMAC MAC Station Address - octets 1-4
u32 MacAddressHigh; // 0x94 - AXGMAC MAC Station Address - octets 5-6
u32 MacReserved1[2]; // 0x98 - AXGMAC Reserved
u32 MacMaxFrameLen; // 0xA0 - AXGMAC Maximum Frame Length
u32 MacReserved2[2]; // 0xA4 - AXGMAC Reserved
u32 MacRevision; // 0xAC - AXGMAC Revision Level Register
u32 MacReserved3[4]; // 0xB0 - AXGMAC Reserved
u32 MacAmiimCmd; // 0xC0 - AXGMAC AMIIM Command Register
u32 MacAmiimField; // 0xC4 - AXGMAC AMIIM Field Register
u32 MacAmiimConfig; // 0xC8 - AXGMAC AMIIM Configuration Register
u32 MacAmiimLink; // 0xCC - AXGMAC AMIIM Link Fail Vector Register
u32 MacAmiimIndicator; // 0xD0 - AXGMAC AMIIM Indicator Registor
u32 PadTo0x100[11]; // 0xD4 - 0x100 - Pad
u32 XmtConfig; // 0x100 - Transmit Configuration Register
u32 RcvConfig; // 0x104 - Receive Configuration Register 1
u32 LinkAddress0Low; // 0x108 - Link address 0
u32 LinkAddress0High; // 0x10C - Link address 0
u32 LinkAddress1Low; // 0x110 - Link address 1
u32 LinkAddress1High; // 0x114 - Link address 1
u32 LinkAddress2Low; // 0x118 - Link address 2
u32 LinkAddress2High; // 0x11C - Link address 2
u32 LinkAddress3Low; // 0x120 - Link address 3
u32 LinkAddress3High; // 0x124 - Link address 3
u32 ToeplitzKey[10]; // 0x128 - 0x150 - Toeplitz key
u32 SocketKey[10]; // 0x150 - 0x178 - Socket Key
u32 LinkStatus; // 0x178 - Link status
u32 ClearStats; // 0x17C - Clear Stats
u32 XmtErrorsLow; // 0x180 - Transmit stats - errors
u32 XmtErrorsHigh; // 0x184 - Transmit stats - errors
u32 XmtFramesLow; // 0x188 - Transmit stats - frame count
u32 XmtFramesHigh; // 0x18C - Transmit stats - frame count
u32 XmtBytesLow; // 0x190 - Transmit stats - byte count
u32 XmtBytesHigh; // 0x194 - Transmit stats - byte count
u32 XmtTcpSegmentsLow; // 0x198 - Transmit stats - TCP segments
u32 XmtTcpSegmentsHigh; // 0x19C - Transmit stats - TCP segments
u32 XmtTcpBytesLow; // 0x1A0 - Transmit stats - TCP bytes
u32 XmtTcpBytesHigh; // 0x1A4 - Transmit stats - TCP bytes
u32 RcvErrorsLow; // 0x1A8 - Receive stats - errors
u32 RcvErrorsHigh; // 0x1AC - Receive stats - errors
u32 RcvFramesLow; // 0x1B0 - Receive stats - frame count
u32 RcvFramesHigh; // 0x1B4 - Receive stats - frame count
u32 RcvBytesLow; // 0x1B8 - Receive stats - byte count
u32 RcvBytesHigh; // 0x1BC - Receive stats - byte count
u32 RcvTcpSegmentsLow; // 0x1C0 - Receive stats - TCP segments
u32 RcvTcpSegmentsHigh; // 0x1C4 - Receive stats - TCP segments
u32 RcvTcpBytesLow; // 0x1C8 - Receive stats - TCP bytes
u32 RcvTcpBytesHigh; // 0x1CC - Receive stats - TCP bytes
u32 PadTo0x200[12]; // 0x1D0 - 0x200 - Pad
u32 Software[1920]; // 0x200 - 0x2000 - Software defined (not used)
u32 MsixTable[1024]; // 0x2000 - 0x3000 - MSIX Table
u32 MsixBitArray[1024]; // 0x3000 - 0x4000 - MSIX Pending Bit Array
u32 Reset; /* Write 0xdead to invoke soft reset */
u32 Pad1; /* No register defined at offset 4 */
u32 InterruptMask0; /* Deassert legacy interrupt on function 0 */
u32 InterruptMask1; /* Deassert legacy interrupt on function 1 */
u32 UcodeDataLow; /* Store microcode instruction bits 31-0 */
u32 UcodeDataMiddle; /* Store microcode instruction bits 63-32 */
u32 UcodeDataHigh; /* Store microcode instruction bits 95-64 */
u32 UcodeAddr; /* Store microcode address - See flags below */
u32 PadTo0x80[24]; /* Pad to Xcv configuration registers */
u32 MacConfig0; /* 0x80 - AXGMAC Configuration Register 0 */
u32 MacConfig1; /* 0x84 - AXGMAC Configuration Register 1 */
u32 MacConfig2; /* 0x88 - AXGMAC Configuration Register 2 */
u32 MacConfig3; /* 0x8C - AXGMAC Configuration Register 3 */
u32 MacAddressLow; /* 0x90 - AXGMAC MAC Station Address - octets 1-4 */
u32 MacAddressHigh; /* 0x94 - AXGMAC MAC Station Address - octets 5-6 */
u32 MacReserved1[2]; /* 0x98 - AXGMAC Reserved */
u32 MacMaxFrameLen; /* 0xA0 - AXGMAC Maximum Frame Length */
u32 MacReserved2[2]; /* 0xA4 - AXGMAC Reserved */
u32 MacRevision; /* 0xAC - AXGMAC Revision Level Register */
u32 MacReserved3[4]; /* 0xB0 - AXGMAC Reserved */
u32 MacAmiimCmd; /* 0xC0 - AXGMAC AMIIM Command Register */
u32 MacAmiimField; /* 0xC4 - AXGMAC AMIIM Field Register */
u32 MacAmiimConfig; /* 0xC8 - AXGMAC AMIIM Configuration Register */
u32 MacAmiimLink; /* 0xCC - AXGMAC AMIIM Link Fail Vector Register */
u32 MacAmiimIndicator; /* 0xD0 - AXGMAC AMIIM Indicator Registor */
u32 PadTo0x100[11]; /* 0xD4 - 0x100 - Pad */
u32 XmtConfig; /* 0x100 - Transmit Configuration Register */
u32 RcvConfig; /* 0x104 - Receive Configuration Register 1 */
u32 LinkAddress0Low; /* 0x108 - Link address 0 */
u32 LinkAddress0High; /* 0x10C - Link address 0 */
u32 LinkAddress1Low; /* 0x110 - Link address 1 */
u32 LinkAddress1High; /* 0x114 - Link address 1 */
u32 LinkAddress2Low; /* 0x118 - Link address 2 */
u32 LinkAddress2High; /* 0x11C - Link address 2 */
u32 LinkAddress3Low; /* 0x120 - Link address 3 */
u32 LinkAddress3High; /* 0x124 - Link address 3 */
u32 ToeplitzKey[10]; /* 0x128 - 0x150 - Toeplitz key */
u32 SocketKey[10]; /* 0x150 - 0x178 - Socket Key */
u32 LinkStatus; /* 0x178 - Link status */
u32 ClearStats; /* 0x17C - Clear Stats */
u32 XmtErrorsLow; /* 0x180 - Transmit stats - errors */
u32 XmtErrorsHigh; /* 0x184 - Transmit stats - errors */
u32 XmtFramesLow; /* 0x188 - Transmit stats - frame count */
u32 XmtFramesHigh; /* 0x18C - Transmit stats - frame count */
u32 XmtBytesLow; /* 0x190 - Transmit stats - byte count */
u32 XmtBytesHigh; /* 0x194 - Transmit stats - byte count */
u32 XmtTcpSegmentsLow; /* 0x198 - Transmit stats - TCP segments */
u32 XmtTcpSegmentsHigh; /* 0x19C - Transmit stats - TCP segments */
u32 XmtTcpBytesLow; /* 0x1A0 - Transmit stats - TCP bytes */
u32 XmtTcpBytesHigh; /* 0x1A4 - Transmit stats - TCP bytes */
u32 RcvErrorsLow; /* 0x1A8 - Receive stats - errors */
u32 RcvErrorsHigh; /* 0x1AC - Receive stats - errors */
u32 RcvFramesLow; /* 0x1B0 - Receive stats - frame count */
u32 RcvFramesHigh; /* 0x1B4 - Receive stats - frame count */
u32 RcvBytesLow; /* 0x1B8 - Receive stats - byte count */
u32 RcvBytesHigh; /* 0x1BC - Receive stats - byte count */
u32 RcvTcpSegmentsLow; /* 0x1C0 - Receive stats - TCP segments */
u32 RcvTcpSegmentsHigh; /* 0x1C4 - Receive stats - TCP segments */
u32 RcvTcpBytesLow; /* 0x1C8 - Receive stats - TCP bytes */
u32 RcvTcpBytesHigh; /* 0x1CC - Receive stats - TCP bytes */
u32 PadTo0x200[12]; /* 0x1D0 - 0x200 - Pad */
u32 Software[1920]; /* 0x200 - 0x2000 - Software defined (not used) */
u32 MsixTable[1024]; /* 0x2000 - 0x3000 - MSIX Table */
u32 MsixBitArray[1024]; /* 0x3000 - 0x4000 - MSIX Pending Bit Array */
};
#pragma pack(pop)
// Microcode Address Flags
#define MICROCODE_ADDRESS_GO 0x80000000 // Start microcode
#define MICROCODE_ADDRESS_WRITE 0x40000000 // Store microcode
#define MICROCODE_ADDRESS_READ 0x20000000 // Read microcode
#define MICROCODE_ADDRESS_PARITY 0x10000000 // Parity error detected
#define MICROCODE_ADDRESS_MASK 0x00001FFF // Address bits
/* Microcode Address Flags */
#define MICROCODE_ADDRESS_GO 0x80000000 /* Start microcode */
#define MICROCODE_ADDRESS_WRITE 0x40000000 /* Store microcode */
#define MICROCODE_ADDRESS_READ 0x20000000 /* Read microcode */
#define MICROCODE_ADDRESS_PARITY 0x10000000 /* Parity error detected */
#define MICROCODE_ADDRESS_MASK 0x00001FFF /* Address bits */
// Link Address Registers
#define LINK_ADDRESS_ENABLE 0x80000000 // Applied to link address high
/* Link Address Registers */
#define LINK_ADDRESS_ENABLE 0x80000000 /* Applied to link address high */
// Microsoft register space size
#define SXG_UCODEREG_MEMSIZE 0x40000 // 256k
/* Microsoft register space size */
#define SXG_UCODEREG_MEMSIZE 0x40000 /* 256k */
// Sahara microcode register address format. The command code,
// extended command code, and associated processor are encoded in
// the address bits as follows
#define SXG_ADDRESS_CODE_SHIFT 2 // Base command code
/*
* Sahara microcode register address format. The command code,
* extended command code, and associated processor are encoded in
* the address bits as follows
*/
#define SXG_ADDRESS_CODE_SHIFT 2 /* Base command code */
#define SXG_ADDRESS_CODE_MASK 0x0000003C
#define SXG_ADDRESS_EXCODE_SHIFT 6 // Extended (or sub) command code
#define SXG_ADDRESS_EXCODE_SHIFT 6 /* Extended (or sub) command code */
#define SXG_ADDRESS_EXCODE_MASK 0x00001FC0
#define SXG_ADDRESS_CPUID_SHIFT 13 // CPU
#define SXG_ADDRESS_CPUID_SHIFT 13 /* CPU */
#define SXG_ADDRESS_CPUID_MASK 0x0003E000
#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 // Used to sanity check UCODE_REGS structure
// Sahara receive sequencer status values
#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention
#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 // Transport mask
#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 // Transport error
#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 // Transport cksum error
#define SXG_RCV_STATUS_TRANSPORT_UFLOW 0x22000000 // Transport underflow
#define SXG_RCV_STATUS_TRANSPORT_HDRLEN 0x20000000 // Transport header length
#define SXG_RCV_STATUS_TRANSPORT_FLAGS 0x10000000 // Transport flags detected
#define SXG_RCV_STATUS_TRANSPORT_OPTS 0x08000000 // Transport options detected
#define SXG_RCV_STATUS_TRANSPORT_SESS_MASK 0x07000000 // Transport DDP
#define SXG_RCV_STATUS_TRANSPORT_DDP 0x06000000 // Transport DDP
#define SXG_RCV_STATUS_TRANSPORT_iSCSI 0x05000000 // Transport iSCSI
#define SXG_RCV_STATUS_TRANSPORT_NFS 0x04000000 // Transport NFS
#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 // Transport FTP
#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 // Transport HTTP
#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 // Transport SMB
#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask
#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 // Network error
#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error
#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 // Network underflow error
#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 // Network header length
#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 // Network overflow detected
#define SXG_RCV_STATUS_NETWORK_MCAST 0x00200000 // Network multicast detected
#define SXG_RCV_STATUS_NETWORK_OPTIONS 0x00100000 // Network options detected
#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 // Network offset detected
#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 // Network fragment detected
#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 // Network transport type mask
#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP
#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP
#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP
#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority
#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift
#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error
#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask
#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D
#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C
#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B
#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A
#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 // Link address broadcast
#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 // Link address multicast
#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 // Link control multicast
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error
#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early
#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 // Buffer overflow
#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error
#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble
#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error
#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow
#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow
#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3
#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap
#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN
#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 // Network type mask
#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet
#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet
#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet
/***************************************************************************
* Sahara receive and transmit configuration registers
***************************************************************************/
#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset
#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic
#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser
#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector
#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames
#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames
#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn
#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz
#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz
#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers
#define RCV_CONFIG_PRIORITY_MASK 0x00300000 // Priority level
#define RCV_CONFIG_CONN_MASK 0x000C0000 // Number of connections
#define RCV_CONFIG_CONN_4K 0x00000000 // 4k connections
#define RCV_CONFIG_CONN_2K 0x00040000 // 2k connections
#define RCV_CONFIG_CONN_1K 0x00080000 // 1k connections
#define RCV_CONFIG_CONN_512 0x000C0000 // 512 connections
#define RCV_CONFIG_HASH_MASK 0x00030000 // Hash depth
#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8
#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16
#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4
#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2
#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 // Buffer length bits 15:4. ie multiple of 16.
#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn
// Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
// We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
// and round up to nearest 16 byte boundary
#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 /* Used to sanity check UCODE_REGS structure */
/* Sahara receive sequencer status values */
#define SXG_RCV_STATUS_ATTN 0x80000000 /* Attention */
#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 /* Transport mask */
#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 /* Transport error */
#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 /* Transport cksum error */
#define SXG_RCV_STATUS_TRANSPORT_UFLOW 0x22000000 /* Transport underflow */
#define SXG_RCV_STATUS_TRANSPORT_HDRLEN 0x20000000 /* Transport header length */
#define SXG_RCV_STATUS_TRANSPORT_FLAGS 0x10000000 /* Transport flags detected */
#define SXG_RCV_STATUS_TRANSPORT_OPTS 0x08000000 /* Transport options detected */
#define SXG_RCV_STATUS_TRANSPORT_SESS_MASK 0x07000000 /* Transport DDP */
#define SXG_RCV_STATUS_TRANSPORT_DDP 0x06000000 /* Transport DDP */
#define SXG_RCV_STATUS_TRANSPORT_iSCSI 0x05000000 /* Transport iSCSI */
#define SXG_RCV_STATUS_TRANSPORT_NFS 0x04000000 /* Transport NFS */
#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 /* Transport FTP */
#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 /* Transport HTTP */
#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 /* Transport SMB */
#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 /* Network mask */
#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 /* Network error */
#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 /* Network cksum error */
#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 /* Network underflow error */
#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 /* Network header length */
#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 /* Network overflow detected */
#define SXG_RCV_STATUS_NETWORK_MCAST 0x00200000 /* Network multicast detected */
#define SXG_RCV_STATUS_NETWORK_OPTIONS 0x00100000 /* Network options detected */
#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 /* Network offset detected */
#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 /* Network fragment detected */
#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 /* Network transport type mask */
#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 /* UDP */
#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 /* TCP */
#define SXG_RCV_STATUS_IPONLY 0x00008000 /* IP-only not TCP */
#define SXG_RCV_STATUS_PKT_PRI 0x00006000 /* Receive priority */
#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 /* Receive priority shift */
#define SXG_RCV_STATUS_PARITY 0x00001000 /* MAC Receive RAM parity error */
#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 /* Link address detection mask */
#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 /* Link address D */
#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 /* Link address C */
#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 /* Link address B */
#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 /* Link address A */
#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 /* Link address broadcast */
#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 /* Link address multicast */
#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 /* Link control multicast */
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF /* Link status mask */
#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 /* Link error */
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF /* Link status mask */
#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 /* RcvMacQ parity error */
#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 /* Data early */
#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 /* Buffer overflow */
#define SXG_RCV_STATUS_LINK_CODE 0x00000084 /* Link code error */
#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 /* Dribble nibble */
#define SXG_RCV_STATUS_LINK_CRC 0x00000082 /* CRC error */
#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 /* Link overflow */
#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 /* Link underflow */
#define SXG_RCV_STATUS_LINK_8023 0x00000020 /* 802.3 */
#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 /* Snap */
#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 /* VLAN */
#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 /* Network type mask */
#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 /* Control packet */
#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 /* IPv6 packet */
#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 /* IPv4 packet */
/* Sahara receive and transmit configuration registers */
#define RCV_CONFIG_RESET 0x80000000 /* RcvConfig register reset */
#define RCV_CONFIG_ENABLE 0x40000000 /* Enable the receive logic */
#define RCV_CONFIG_ENPARSE 0x20000000 /* Enable the receive parser */
#define RCV_CONFIG_SOCKET 0x10000000 /* Enable the socket detector */
#define RCV_CONFIG_RCVBAD 0x08000000 /* Receive all bad frames */
#define RCV_CONFIG_CONTROL 0x04000000 /* Receive all control frames */
#define RCV_CONFIG_RCVPAUSE 0x02000000 /* Enable pause transmit when attn */
#define RCV_CONFIG_TZIPV6 0x01000000 /* Include TCP port w/ IPv6 toeplitz */
#define RCV_CONFIG_TZIPV4 0x00800000 /* Include TCP port w/ IPv4 toeplitz */
#define RCV_CONFIG_FLUSH 0x00400000 /* Flush buffers */
#define RCV_CONFIG_PRIORITY_MASK 0x00300000 /* Priority level */
#define RCV_CONFIG_CONN_MASK 0x000C0000 /* Number of connections */
#define RCV_CONFIG_CONN_4K 0x00000000 /* 4k connections */
#define RCV_CONFIG_CONN_2K 0x00040000 /* 2k connections */
#define RCV_CONFIG_CONN_1K 0x00080000 /* 1k connections */
#define RCV_CONFIG_CONN_512 0x000C0000 /* 512 connections */
#define RCV_CONFIG_HASH_MASK 0x00030000 /* Hash depth */
#define RCV_CONFIG_HASH_8 0x00000000 /* Hash depth 8 */
#define RCV_CONFIG_HASH_16 0x00010000 /* Hash depth 16 */
#define RCV_CONFIG_HASH_4 0x00020000 /* Hash depth 4 */
#define RCV_CONFIG_HASH_2 0x00030000 /* Hash depth 2 */
#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 /* Buffer length bits 15:4. ie multiple of 16. */
#define RCV_CONFIG_SKT_DIS 0x00000008 /* Disable socket detection on attn */
/*
* Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
* We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
* and round up to nearest 16 byte boundary
*/
#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK)
#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset
#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic
#define XMT_CONFIG_MAC_PARITY 0x20000000 // Inhibit MAC RAM parity error
#define XMT_CONFIG_BUF_PARITY 0x10000000 // Inhibit D2F buffer parity error
#define XMT_CONFIG_MEM_PARITY 0x08000000 // Inhibit 1T SRAM parity error
#define XMT_CONFIG_INVERT_PARITY 0x04000000 // Invert MAC RAM parity
#define XMT_CONFIG_INITIAL_IPID 0x0000FFFF // Initial IPID
#define XMT_CONFIG_RESET 0x80000000 /* XmtConfig register reset */
#define XMT_CONFIG_ENABLE 0x40000000 /* Enable transmit logic */
#define XMT_CONFIG_MAC_PARITY 0x20000000 /* Inhibit MAC RAM parity error */
#define XMT_CONFIG_BUF_PARITY 0x10000000 /* Inhibit D2F buffer parity error */
#define XMT_CONFIG_MEM_PARITY 0x08000000 /* Inhibit 1T SRAM parity error */
#define XMT_CONFIG_INVERT_PARITY 0x04000000 /* Invert MAC RAM parity */
#define XMT_CONFIG_INITIAL_IPID 0x0000FFFF /* Initial IPID */
/***************************************************************************
/*
* A-XGMAC Registers - Occupy 0x80 - 0xD4 of the struct sxg_hw_regs
*
* Full register descriptions can be found in axgmac.pdf
***************************************************************************/
// A-XGMAC Configuration Register 0
#define AXGMAC_CFG0_SUB_RESET 0x80000000 // Sub module reset
#define AXGMAC_CFG0_RCNTRL_RESET 0x00400000 // Receive control reset
#define AXGMAC_CFG0_RFUNC_RESET 0x00200000 // Receive function reset
#define AXGMAC_CFG0_TCNTRL_RESET 0x00040000 // Transmit control reset
#define AXGMAC_CFG0_TFUNC_RESET 0x00020000 // Transmit function reset
#define AXGMAC_CFG0_MII_RESET 0x00010000 // MII Management reset
// A-XGMAC Configuration Register 1
#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 // Allow the sending of Pause frames
#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit
#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 // Allow the detection of Pause frames
#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive
#define AXGMAC_CFG1_XMT_STATE 0x04000000 // Current transmit state - READ ONLY
#define AXGMAC_CFG1_RCV_STATE 0x01000000 // Current receive state - READ ONLY
#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 // Only pause for 64 slot on XOFF
#define AXGMAC_CFG1_XMG_FCS1 0x00000400 // Delay transmit FCS 1 4-byte word
#define AXGMAC_CFG1_XMG_FCS2 0x00000800 // Delay transmit FCS 2 4-byte words
#define AXGMAC_CFG1_XMG_FCS3 0x00000C00 // Delay transmit FCS 3 4-byte words
#define AXGMAC_CFG1_RCV_FCS1 0x00000100 // Delay receive FCS 1 4-byte word
#define AXGMAC_CFG1_RCV_FCS2 0x00000200 // Delay receive FCS 2 4-byte words
#define AXGMAC_CFG1_RCV_FCS3 0x00000300 // Delay receive FCS 3 4-byte words
#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 // Per-packet override enable
#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable
#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 // ASSERT srdrpfrm on short frame (<64)
#define AXGMAC_CFG1_RCV_STRICT 0x00000010 // RCV only 802.3AE when CLEAR
#define AXGMAC_CFG1_CHECK_LEN 0x00000008 // Verify frame length
#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS
#define AXGMAC_CFG1_PAD_MASK 0x00000003 // Mask for pad bits
#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes
#define AXGMAC_CFG1_PAD_VLAN 0x00000002 // Detect VLAN and pad to 68 bytes
#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes
// A-XGMAC Configuration Register 2
#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 // Generate single pause frame (test)
#define AXGMAC_CFG2_LF_MANUAL 0x08000000 // Manual link fault sequence
#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence
#define AXGMAC_CFG2_LF_REMOTE 0x02000000 // Remote link fault (READ ONLY)
#define AXGMAC_CFG2_LF_LOCAL 0x01000000 // Local link fault (READ ONLY)
#define AXGMAC_CFG2_IPG_MASK 0x001F0000 // Inter packet gap
*/
/* A-XGMAC Configuration Register 0 */
#define AXGMAC_CFG0_SUB_RESET 0x80000000 /* Sub module reset */
#define AXGMAC_CFG0_RCNTRL_RESET 0x00400000 /* Receive control reset */
#define AXGMAC_CFG0_RFUNC_RESET 0x00200000 /* Receive function reset */
#define AXGMAC_CFG0_TCNTRL_RESET 0x00040000 /* Transmit control reset */
#define AXGMAC_CFG0_TFUNC_RESET 0x00020000 /* Transmit function reset */
#define AXGMAC_CFG0_MII_RESET 0x00010000 /* MII Management reset */
/* A-XGMAC Configuration Register 1 */
#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 /* Allow the sending of Pause frames */
#define AXGMAC_CFG1_XMT_EN 0x40000000 /* Enable transmit */
#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 /* Allow the detection of Pause frames */
#define AXGMAC_CFG1_RCV_EN 0x10000000 /* Enable receive */
#define AXGMAC_CFG1_XMT_STATE 0x04000000 /* Current transmit state - READ ONLY */
#define AXGMAC_CFG1_RCV_STATE 0x01000000 /* Current receive state - READ ONLY */
#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 /* Only pause for 64 slot on XOFF */
#define AXGMAC_CFG1_XMG_FCS1 0x00000400 /* Delay transmit FCS 1 4-byte word */
#define AXGMAC_CFG1_XMG_FCS2 0x00000800 /* Delay transmit FCS 2 4-byte words */
#define AXGMAC_CFG1_XMG_FCS3 0x00000C00 /* Delay transmit FCS 3 4-byte words */
#define AXGMAC_CFG1_RCV_FCS1 0x00000100 /* Delay receive FCS 1 4-byte word */
#define AXGMAC_CFG1_RCV_FCS2 0x00000200 /* Delay receive FCS 2 4-byte words */
#define AXGMAC_CFG1_RCV_FCS3 0x00000300 /* Delay receive FCS 3 4-byte words */
#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 /* Per-packet override enable */
#define AXGMAC_CFG1_SWAP 0x00000040 /* Byte swap enable */
#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 /* ASSERT srdrpfrm on short frame (<64) */
#define AXGMAC_CFG1_RCV_STRICT 0x00000010 /* RCV only 802.3AE when CLEAR */
#define AXGMAC_CFG1_CHECK_LEN 0x00000008 /* Verify frame length */
#define AXGMAC_CFG1_GEN_FCS 0x00000004 /* Generate FCS */
#define AXGMAC_CFG1_PAD_MASK 0x00000003 /* Mask for pad bits */
#define AXGMAC_CFG1_PAD_64 0x00000001 /* Pad frames to 64 bytes */
#define AXGMAC_CFG1_PAD_VLAN 0x00000002 /* Detect VLAN and pad to 68 bytes */
#define AXGMAC_CFG1_PAD_68 0x00000003 /* Pad to 68 bytes */
/* A-XGMAC Configuration Register 2 */
#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 /* Generate single pause frame (test) */
#define AXGMAC_CFG2_LF_MANUAL 0x08000000 /* Manual link fault sequence */
#define AXGMAC_CFG2_LF_AUTO 0x04000000 /* Auto link fault sequence */
#define AXGMAC_CFG2_LF_REMOTE 0x02000000 /* Remote link fault (READ ONLY) */
#define AXGMAC_CFG2_LF_LOCAL 0x01000000 /* Local link fault (READ ONLY) */
#define AXGMAC_CFG2_IPG_MASK 0x001F0000 /* Inter packet gap */
#define AXGMAC_CFG2_IPG_SHIFT 16
#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 // Pause transmit module
#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 // Enable IPG extension algorithm
#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F // IPG extension
// A-XGMAC Configuration Register 3
#define AXGMAC_CFG3_RCV_DROP 0xFFFF0000 // Receive frame drop filter
#define AXGMAC_CFG3_RCV_DONT_CARE 0x0000FFFF // Receive frame don't care filter
// A-XGMAC Station Address Register - Octets 1-4
#define AXGMAC_SARLOW_OCTET_ONE 0xFF000000 // First octet
#define AXGMAC_SARLOW_OCTET_TWO 0x00FF0000 // Second octet
#define AXGMAC_SARLOW_OCTET_THREE 0x0000FF00 // Third octet
#define AXGMAC_SARLOW_OCTET_FOUR 0x000000FF // Fourth octet
// A-XGMAC Station Address Register - Octets 5-6
#define AXGMAC_SARHIGH_OCTET_FIVE 0xFF000000 // Fifth octet
#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 // Sixth octet
// A-XGMAC Maximum frame length register
#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length
#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 /* Pause transmit module */
#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 /* Enable IPG extension algorithm */
#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F /* IPG extension */
/* A-XGMAC Configuration Register 3 */
#define AXGMAC_CFG3_RCV_DROP 0xFFFF0000 /* Receive frame drop filter */
#define AXGMAC_CFG3_RCV_DONT_CARE 0x0000FFFF /* Receive frame don't care filter */
/* A-XGMAC Station Address Register - Octets 1-4 */
#define AXGMAC_SARLOW_OCTET_ONE 0xFF000000 /* First octet */
#define AXGMAC_SARLOW_OCTET_TWO 0x00FF0000 /* Second octet */
#define AXGMAC_SARLOW_OCTET_THREE 0x0000FF00 /* Third octet */
#define AXGMAC_SARLOW_OCTET_FOUR 0x000000FF /* Fourth octet */
/* A-XGMAC Station Address Register - Octets 5-6 */
#define AXGMAC_SARHIGH_OCTET_FIVE 0xFF000000 /* Fifth octet */
#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 /* Sixth octet */
/* A-XGMAC Maximum frame length register */
#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 /* Maximum transmit frame length */
#define AXGMAC_MAXFRAME_XMT_SHIFT 16
#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length
// This register doesn't need to be written for standard MTU.
// For jumbo, I'll just statically define the value here. This
// value sets the receive byte count to 9036 (0x234C) and the
// transmit WORD count to 2259 (0x8D3). These values include 22
// bytes of padding beyond the jumbo MTU of 9014
#define AXGMAC_MAXFRAME_RCV 0x0000FFFF /* Maximum receive frame length */
/*
* This register doesn't need to be written for standard MTU.
* For jumbo, I'll just statically define the value here. This
* value sets the receive byte count to 9036 (0x234C) and the
* transmit WORD count to 2259 (0x8D3). These values include 22
* bytes of padding beyond the jumbo MTU of 9014
*/
#define AXGMAC_MAXFRAME_JUMBO 0x08D3234C
// A-XGMAC Revision level
#define AXGMAC_REVISION_MASK 0x0000FFFF // Revision level
// A-XGMAC AMIIM Command Register
#define AXGMAC_AMIIM_CMD_START 0x00000008 // Command start
#define AXGMAC_AMIIM_CMD_MASK 0x00000007 // Command
#define AXGMAC_AMIIM_CMD_LEGACY_WRITE 1 // 10/100/1000 Mbps Phy Write
#define AXGMAC_AMIIM_CMD_LEGACY_READ 2 // 10/100/1000 Mbps Phy Read
#define AXGMAC_AMIIM_CMD_MONITOR_SINGLE 3 // Monitor single PHY
#define AXGMAC_AMIIM_CMD_MONITOR_MULTIPLE 4 // Monitor multiple contiguous PHYs
#define AXGMAC_AMIIM_CMD_10G_OPERATION 5 // Present AMIIM Field Reg
#define AXGMAC_AMIIM_CMD_CLEAR_LINK_FAIL 6 // Clear Link Fail Bit in MIIM
// A-XGMAC AMIIM Field Register
#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 // 2-bit ST field
/* A-XGMAC Revision level */
#define AXGMAC_REVISION_MASK 0x0000FFFF /* Revision level */
/* A-XGMAC AMIIM Command Register */
#define AXGMAC_AMIIM_CMD_START 0x00000008 /* Command start */
#define AXGMAC_AMIIM_CMD_MASK 0x00000007 /* Command */
#define AXGMAC_AMIIM_CMD_LEGACY_WRITE 1 /* 10/100/1000 Mbps Phy Write */
#define AXGMAC_AMIIM_CMD_LEGACY_READ 2 /* 10/100/1000 Mbps Phy Read */
#define AXGMAC_AMIIM_CMD_MONITOR_SINGLE 3 /* Monitor single PHY */
#define AXGMAC_AMIIM_CMD_MONITOR_MULTIPLE 4 /* Monitor multiple contiguous PHYs */
#define AXGMAC_AMIIM_CMD_10G_OPERATION 5 /* Present AMIIM Field Reg */
#define AXGMAC_AMIIM_CMD_CLEAR_LINK_FAIL 6 /* Clear Link Fail Bit in MIIM */
/* A-XGMAC AMIIM Field Register */
#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 /* 2-bit ST field */
#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
#define AXGMAC_AMIIM_FIELD_OP 0x30000000 // 2-bit OP field
#define AXGMAC_AMIIM_FIELD_OP 0x30000000 /* 2-bit OP field */
#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec)
#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 /* Port address field (hstphyadx in spec) */
#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23
#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 // Device address field (hstregadx in spec)
#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 /* Device address field (hstregadx in spec) */
#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18
#define AXGMAC_AMIIM_FIELD_TA 0x00030000 // 2-bit TA field
#define AXGMAC_AMIIM_FIELD_TA 0x00030000 /* 2-bit TA field */
#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field
// Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register
#define MIIM_OP_ADDR 0 // MIIM Address set operation
#define MIIM_OP_WRITE 1 // MIIM Write register operation
#define MIIM_OP_READ 2 // MIIM Read register operation
/* Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register */
#define MIIM_OP_ADDR 0 /* MIIM Address set operation */
#define MIIM_OP_WRITE 1 /* MIIM Write register operation */
#define MIIM_OP_READ 2 /* MIIM Read register operation */
#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT)
// Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register
#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1
// Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register
#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number
#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number
#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number
#define MIIM_DEV_XGXS 5 // XGXS MIIM device number
// Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register
#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation
// A-XGMAC AMIIM Configuration Register
#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 // Bypass preamble of mngmt frame
#define AXGMAC_AMIIM_CFG_HALF_CLOCK 0x0000007F // half-clock duration of MDC output
// A-XGMAC AMIIM Indicator Register
#define AXGMAC_AMIIM_INDC_LINK 0x00000010 // Link status from legacy PHY or MMD
#define AXGMAC_AMIIM_INDC_MPHY 0x00000008 // Multiple phy operation in progress
#define AXGMAC_AMIIM_INDC_SPHY 0x00000004 // Single phy operation in progress
#define AXGMAC_AMIIM_INDC_MON 0x00000002 // Single or multiple monitor cmd
#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 // Set until cmd operation complete
// Link Status and Control Register
#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY
#define LS_SERDES_POWER_DOWN 0x40000000 // Power down the Sahara Serdes
#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic
#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up
#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete
#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete
#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz)
#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY
#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up
#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed
#define LS_LINK_ALARM 0x00000004 // Link alarm pin
#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits
#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm
#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 // 01 => Attn on link alarm or status change
#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change
#define LS_ATTN_NONE 0x00000003 // 11 => no Attn
// Link Address High Registers
#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address
/***************************************************************************
/* Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register */
#define MIIM_PORT_NUM 1 /* All Sahara MIIM modules use port 1 */
/* Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register */
#define MIIM_DEV_PHY_PMA 1 /* PHY PMA/PMD module MIIM device number */
#define MIIM_DEV_PHY_PCS 3 /* PHY PCS module MIIM device number */
#define MIIM_DEV_PHY_XS 4 /* PHY XS module MIIM device number */
#define MIIM_DEV_XGXS 5 /* XGXS MIIM device number */
/* Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register */
#define MIIM_TA_10GB 2 /* set to 2 for 10 GB operation */
/* A-XGMAC AMIIM Configuration Register */
#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 /* Bypass preamble of mngmt frame */
#define AXGMAC_AMIIM_CFG_HALF_CLOCK 0x0000007F /* half-clock duration of MDC output */
/* A-XGMAC AMIIM Indicator Register */
#define AXGMAC_AMIIM_INDC_LINK 0x00000010 /* Link status from legacy PHY or MMD */
#define AXGMAC_AMIIM_INDC_MPHY 0x00000008 /* Multiple phy operation in progress */
#define AXGMAC_AMIIM_INDC_SPHY 0x00000004 /* Single phy operation in progress */
#define AXGMAC_AMIIM_INDC_MON 0x00000002 /* Single or multiple monitor cmd */
#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 /* Set until cmd operation complete */
/* Link Status and Control Register */
#define LS_PHY_CLR_RESET 0x80000000 /* Clear reset signal to PHY */
#define LS_SERDES_POWER_DOWN 0x40000000 /* Power down the Sahara Serdes */
#define LS_XGXS_ENABLE 0x20000000 /* Enable the XAUI XGXS logic */
#define LS_XGXS_CTL 0x10000000 /* Hold XAUI XGXS logic reset until Serdes is up */
#define LS_SERDES_DOWN 0x08000000 /* When 0, XAUI Serdes is up and initialization is complete */
#define LS_TRACE_DOWN 0x04000000 /* When 0, Trace Serdes is up and initialization is complete */
#define LS_PHY_CLK_25MHZ 0x02000000 /* Set PHY clock to 25 MHz (else 156.125 MHz) */
#define LS_PHY_CLK_EN 0x01000000 /* Enable clock to PHY */
#define LS_XAUI_LINK_UP 0x00000010 /* XAUI link is up */
#define LS_XAUI_LINK_CHNG 0x00000008 /* XAUI link status has changed */
#define LS_LINK_ALARM 0x00000004 /* Link alarm pin */
#define LS_ATTN_CTRL_MASK 0x00000003 /* Mask link attention control bits */
#define LS_ATTN_ALARM 0x00000000 /* 00 => Attn on link alarm */
#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 /* 01 => Attn on link alarm or status change */
#define LS_ATTN_STAT_CHNG 0x00000002 /* 10 => Attn on link status change */
#define LS_ATTN_NONE 0x00000003 /* 11 => no Attn */
/* Link Address High Registers */
#define LINK_ADDR_ENABLE 0x80000000 /* Enable this link address */
/*
* XGXS XAUI XGMII Extender registers
*
* Full register descriptions can be found in mxgxs.pdf
***************************************************************************/
// XGXS Register Map
#define XGXS_ADDRESS_CONTROL1 0x0000 // XS Control 1
#define XGXS_ADDRESS_STATUS1 0x0001 // XS Status 1
#define XGXS_ADDRESS_DEVID_LOW 0x0002 // XS Device ID (low)
#define XGXS_ADDRESS_DEVID_HIGH 0x0003 // XS Device ID (high)
#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability
#define XGXS_ADDRESS_DEV_LOW 0x0005 // XS Devices in package
#define XGXS_ADDRESS_DEV_HIGH 0x0006 // XS Devices in package
#define XGXS_ADDRESS_STATUS2 0x0008 // XS Status 2
#define XGXS_ADDRESS_PKGID_lOW 0x000E // XS Package Identifier
#define XGXS_ADDRESS_PKGID_HIGH 0x000F // XS Package Identifier
#define XGXS_ADDRESS_LANE_STATUS 0x0018 // 10G XGXS Lane Status
#define XGXS_ADDRESS_TEST_CTRL 0x0019 // 10G XGXS Test Control
#define XGXS_ADDRESS_RESET_LO1 0x8000 // Vendor-Specific Reset Lo 1
#define XGXS_ADDRESS_RESET_LO2 0x8001 // Vendor-Specific Reset Lo 2
#define XGXS_ADDRESS_RESET_HI1 0x8002 // Vendor-Specific Reset Hi 1
#define XGXS_ADDRESS_RESET_HI2 0x8003 // Vendor-Specific Reset Hi 2
// XS Control 1 register bit definitions
#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing
#define XGXS_CONTROL1_LOOPBACK 0x4000 // Enable loopback
#define XGXS_CONTROL1_SPEED1 0x2000 // 0 = unspecified, 1 = 10Gb+
#define XGXS_CONTROL1_LOWPOWER 0x0400 // 1 = Low power mode
#define XGXS_CONTROL1_SPEED2 0x0040 // Same as SPEED1 (?)
#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?)
// XS Status 1 register bit definitions
#define XGXS_STATUS1_FAULT 0x0080 // Fault detected
#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up
#define XGXS_STATUS1_LOWPOWER 0x0002 // 1 = Low power supported
// XS Speed register bit definitions
#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable
// XS Devices register bit definitions
#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present
#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present
#define XGXS_DEVICES_PCS 0x0008 // PCS Present
#define XGXS_DEVICES_WIS 0x0004 // WIS Present
#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present
#define XGXS_DEVICES_CLAUSE22 0x0001 // Clause 22 registers present
// XS Devices High register bit definitions
#define XGXS_DEVICES_VENDOR2 0x8000 // Vendor specific device 2
#define XGXS_DEVICES_VENDOR1 0x4000 // Vendor specific device 1
// XS Status 2 register bit definitions
#define XGXS_STATUS2_DEV_MASK 0xC000 // Device present mask
#define XGXS_STATUS2_DEV_RESPOND 0x8000 // Device responding
#define XGXS_STATUS2_XMT_FAULT 0x0800 // Transmit fault
#define XGXS_STATUS2_RCV_FAULT 0x0400 // Receive fault
// XS Package ID High register bit definitions
#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique
#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model
#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number
// XS Lane Status register bit definitions
#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status
#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability
#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability
#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync
#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync
#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync
#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync
// XS Test Control register bit definitions
#define XGXS_TEST_PATTERN_ENABLE 0x0004 // Test pattern enabled
#define XGXS_TEST_PATTERN_MASK 0x0003 // Test patterns
#define XGXS_TEST_PATTERN_RSVD 0x0003 // Test pattern - reserved
#define XGXS_TEST_PATTERN_MIX 0x0002 // Test pattern - mixed
#define XGXS_TEST_PATTERN_LOW 0x0001 // Test pattern - low
#define XGXS_TEST_PATTERN_HIGH 0x0001 // Test pattern - high
/***************************************************************************
*/
/* XGXS Register Map */
#define XGXS_ADDRESS_CONTROL1 0x0000 /* XS Control 1 */
#define XGXS_ADDRESS_STATUS1 0x0001 /* XS Status 1 */
#define XGXS_ADDRESS_DEVID_LOW 0x0002 /* XS Device ID (low) */
#define XGXS_ADDRESS_DEVID_HIGH 0x0003 /* XS Device ID (high) */
#define XGXS_ADDRESS_SPEED 0x0004 /* XS Speed ability */
#define XGXS_ADDRESS_DEV_LOW 0x0005 /* XS Devices in package */
#define XGXS_ADDRESS_DEV_HIGH 0x0006 /* XS Devices in package */
#define XGXS_ADDRESS_STATUS2 0x0008 /* XS Status 2 */
#define XGXS_ADDRESS_PKGID_lOW 0x000E /* XS Package Identifier */
#define XGXS_ADDRESS_PKGID_HIGH 0x000F /* XS Package Identifier */
#define XGXS_ADDRESS_LANE_STATUS 0x0018 /* 10G XGXS Lane Status */
#define XGXS_ADDRESS_TEST_CTRL 0x0019 /* 10G XGXS Test Control */
#define XGXS_ADDRESS_RESET_LO1 0x8000 /* Vendor-Specific Reset Lo 1 */
#define XGXS_ADDRESS_RESET_LO2 0x8001 /* Vendor-Specific Reset Lo 2 */
#define XGXS_ADDRESS_RESET_HI1 0x8002 /* Vendor-Specific Reset Hi 1 */
#define XGXS_ADDRESS_RESET_HI2 0x8003 /* Vendor-Specific Reset Hi 2 */
/* XS Control 1 register bit definitions */
#define XGXS_CONTROL1_RESET 0x8000 /* Reset - self clearing */
#define XGXS_CONTROL1_LOOPBACK 0x4000 /* Enable loopback */
#define XGXS_CONTROL1_SPEED1 0x2000 /* 0 = unspecified, 1 = 10Gb+ */
#define XGXS_CONTROL1_LOWPOWER 0x0400 /* 1 = Low power mode */
#define XGXS_CONTROL1_SPEED2 0x0040 /* Same as SPEED1 (?) */
#define XGXS_CONTROL1_SPEED 0x003C /* Everything reserved except zero (?) */
/* XS Status 1 register bit definitions */
#define XGXS_STATUS1_FAULT 0x0080 /* Fault detected */
#define XGXS_STATUS1_LINK 0x0004 /* 1 = Link up */
#define XGXS_STATUS1_LOWPOWER 0x0002 /* 1 = Low power supported */
/* XS Speed register bit definitions */
#define XGXS_SPEED_10G 0x0001 /* 1 = 10G capable */
/* XS Devices register bit definitions */
#define XGXS_DEVICES_DTE 0x0020 /* DTE XS Present */
#define XGXS_DEVICES_PHY 0x0010 /* PHY XS Present */
#define XGXS_DEVICES_PCS 0x0008 /* PCS Present */
#define XGXS_DEVICES_WIS 0x0004 /* WIS Present */
#define XGXS_DEVICES_PMD 0x0002 /* PMD/PMA Present */
#define XGXS_DEVICES_CLAUSE22 0x0001 /* Clause 22 registers present */
/* XS Devices High register bit definitions */
#define XGXS_DEVICES_VENDOR2 0x8000 /* Vendor specific device 2 */
#define XGXS_DEVICES_VENDOR1 0x4000 /* Vendor specific device 1 */
/* XS Status 2 register bit definitions */
#define XGXS_STATUS2_DEV_MASK 0xC000 /* Device present mask */
#define XGXS_STATUS2_DEV_RESPOND 0x8000 /* Device responding */
#define XGXS_STATUS2_XMT_FAULT 0x0800 /* Transmit fault */
#define XGXS_STATUS2_RCV_FAULT 0x0400 /* Receive fault */
/* XS Package ID High register bit definitions */
#define XGXS_PKGID_HIGH_ORG 0xFC00 /* Organizationally Unique */
#define XGXS_PKGID_HIGH_MFG 0x03F0 /* Manufacturer Model */
#define XGXS_PKGID_HIGH_REV 0x000F /* Revision Number */
/* XS Lane Status register bit definitions */
#define XGXS_LANE_PHY 0x1000 /* PHY/DTE lane alignment status */
#define XGXS_LANE_PATTERN 0x0800 /* Pattern testing ability */
#define XGXS_LANE_LOOPBACK 0x0400 /* PHY loopback ability */
#define XGXS_LANE_SYNC3 0x0008 /* Lane 3 sync */
#define XGXS_LANE_SYNC2 0x0004 /* Lane 2 sync */
#define XGXS_LANE_SYNC1 0x0002 /* Lane 1 sync */
#define XGXS_LANE_SYNC0 0x0001 /* Lane 0 sync */
/* XS Test Control register bit definitions */
#define XGXS_TEST_PATTERN_ENABLE 0x0004 /* Test pattern enabled */
#define XGXS_TEST_PATTERN_MASK 0x0003 /* Test patterns */
#define XGXS_TEST_PATTERN_RSVD 0x0003 /* Test pattern - reserved */
#define XGXS_TEST_PATTERN_MIX 0x0002 /* Test pattern - mixed */
#define XGXS_TEST_PATTERN_LOW 0x0001 /* Test pattern - low */
#define XGXS_TEST_PATTERN_HIGH 0x0001 /* Test pattern - high */
/*
* External MDIO Bus Registers
*
* Full register descriptions can be found in PHY/XENPAK/IEEE specs
***************************************************************************/
// LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device)
#define LASI_RX_ALARM_CONTROL 0x9000 // LASI RX_ALARM Control
#define LASI_TX_ALARM_CONTROL 0x9001 // LASI TX_ALARM Control
#define LASI_CONTROL 0x9002 // LASI Control
#define LASI_RX_ALARM_STATUS 0x9003 // LASI RX_ALARM Status
#define LASI_TX_ALARM_STATUS 0x9004 // LASI TX_ALARM Status
#define LASI_STATUS 0x9005 // LASI Status
// LASI_CONTROL bit definitions
#define LASI_CTL_RX_ALARM_ENABLE 0x0004 // Enable RX_ALARM interrupts
#define LASI_CTL_TX_ALARM_ENABLE 0x0002 // Enable TX_ALARM interrupts
#define LASI_CTL_LS_ALARM_ENABLE 0x0001 // Enable Link Status interrupts
// LASI_STATUS bit definitions
#define LASI_STATUS_RX_ALARM 0x0004 // RX_ALARM status
#define LASI_STATUS_TX_ALARM 0x0002 // TX_ALARM status
#define LASI_STATUS_LS_ALARM 0x0001 // Link Status
// PHY registers - PMA/PMD (device 1)
#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1
#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1
#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect
// other PMA/PMD registers exist and can be defined as needed
// PHY registers - PCS (device 3)
#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1
#define PHY_PCS_STATUS1 0x0001 // PCS Status 1
#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1
// other PCS registers exist and can be defined as needed
// PHY registers - XS (device 4)
#define PHY_XS_CONTROL1 0x0000 // XS Control 1
#define PHY_XS_STATUS1 0x0001 // XS Status 1
#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status
// other XS registers exist and can be defined as needed
// PHY_PMA_CONTROL1 register bit definitions
#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset
// PHY_PMA_RCV_DET register bit definitions
#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect
// PHY_PCS_10G_STATUS1 register bit definitions
#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks
// PHY_XS_LANE_STATUS register bit definitions
#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned
// PHY Microcode download data structure
*/
/* LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device) */
#define LASI_RX_ALARM_CONTROL 0x9000 /* LASI RX_ALARM Control */
#define LASI_TX_ALARM_CONTROL 0x9001 /* LASI TX_ALARM Control */
#define LASI_CONTROL 0x9002 /* LASI Control */
#define LASI_RX_ALARM_STATUS 0x9003 /* LASI RX_ALARM Status */
#define LASI_TX_ALARM_STATUS 0x9004 /* LASI TX_ALARM Status */
#define LASI_STATUS 0x9005 /* LASI Status */
/* LASI_CONTROL bit definitions */
#define LASI_CTL_RX_ALARM_ENABLE 0x0004 /* Enable RX_ALARM interrupts */
#define LASI_CTL_TX_ALARM_ENABLE 0x0002 /* Enable TX_ALARM interrupts */
#define LASI_CTL_LS_ALARM_ENABLE 0x0001 /* Enable Link Status interrupts */
/* LASI_STATUS bit definitions */
#define LASI_STATUS_RX_ALARM 0x0004 /* RX_ALARM status */
#define LASI_STATUS_TX_ALARM 0x0002 /* TX_ALARM status */
#define LASI_STATUS_LS_ALARM 0x0001 /* Link Status */
/* PHY registers - PMA/PMD (device 1) */
#define PHY_PMA_CONTROL1 0x0000 /* PMA/PMD Control 1 */
#define PHY_PMA_STATUS1 0x0001 /* PMA/PMD Status 1 */
#define PHY_PMA_RCV_DET 0x000A /* PMA/PMD Receive Signal Detect */
/* other PMA/PMD registers exist and can be defined as needed */
/* PHY registers - PCS (device 3) */
#define PHY_PCS_CONTROL1 0x0000 /* PCS Control 1 */
#define PHY_PCS_STATUS1 0x0001 /* PCS Status 1 */
#define PHY_PCS_10G_STATUS1 0x0020 /* PCS 10GBASE-R Status 1 */
/* other PCS registers exist and can be defined as needed */
/* PHY registers - XS (device 4) */
#define PHY_XS_CONTROL1 0x0000 /* XS Control 1 */
#define PHY_XS_STATUS1 0x0001 /* XS Status 1 */
#define PHY_XS_LANE_STATUS 0x0018 /* XS Lane Status */
/* other XS registers exist and can be defined as needed */
/* PHY_PMA_CONTROL1 register bit definitions */
#define PMA_CONTROL1_RESET 0x8000 /* PMA/PMD reset */
/* PHY_PMA_RCV_DET register bit definitions */
#define PMA_RCV_DETECT 0x0001 /* PMA/PMD receive signal detect */
/* PHY_PCS_10G_STATUS1 register bit definitions */
#define PCS_10B_BLOCK_LOCK 0x0001 /* PCS 10GBASE-R locked to receive blocks */
/* PHY_XS_LANE_STATUS register bit definitions */
#define XS_LANE_ALIGN 0x1000 /* XS transmit lanes aligned */
/* PHY Microcode download data structure */
struct phy_ucode {
ushort Addr;
ushort Data;
};
/* Slow Bus Register Definitions */
/*****************************************************************************
* Slow Bus Register Definitions
*****************************************************************************/
// Module 0 registers
#define GPIO_L_IN 0x15 // GPIO input (low)
#define GPIO_L_OUT 0x16 // GPIO output (low)
#define GPIO_L_DIR 0x17 // GPIO direction (low)
#define GPIO_H_IN 0x19 // GPIO input (high)
#define GPIO_H_OUT 0x1A // GPIO output (high)
#define GPIO_H_DIR 0x1B // GPIO direction (high)
/* Module 0 registers */
#define GPIO_L_IN 0x15 /* GPIO input (low) */
#define GPIO_L_OUT 0x16 /* GPIO output (low) */
#define GPIO_L_DIR 0x17 /* GPIO direction (low) */
#define GPIO_H_IN 0x19 /* GPIO input (high) */
#define GPIO_H_OUT 0x1A /* GPIO output (high) */
#define GPIO_H_DIR 0x1B /* GPIO direction (high) */
// Definitions for other slow bus registers can be added as needed
/* Definitions for other slow bus registers can be added as needed */
/*****************************************************************************
/*
* Transmit Sequencer Command Descriptor definitions
*****************************************************************************/
// This descriptor must be placed in GRAM. The address of this descriptor
// (along with a couple of control bits) is pushed onto the PxhCmdQ or PxlCmdQ
// (Proxy high or low command queue). This data is read by the Proxy Sequencer,
// which pushes it onto the XmtCmdQ, which is (eventually) read by the Transmit
// Sequencer, causing a packet to be transmitted. Not all fields are valid for
// all commands - see the Sahara spec for details. Note that this structure is
// only valid when compiled on a little endian machine.
*
* This descriptor must be placed in GRAM. The address of this descriptor
* (along with a couple of control bits) is pushed onto the PxhCmdQ or PxlCmdQ
* (Proxy high or low command queue). This data is read by the Proxy Sequencer,
* which pushes it onto the XmtCmdQ, which is (eventually) read by the Transmit
* Sequencer, causing a packet to be transmitted. Not all fields are valid for
* all commands - see the Sahara spec for details. Note that this structure is
* only valid when compiled on a little endian machine.
*/
#pragma pack(push, 1)
struct xmt_desc {
ushort XmtLen; // word 0, bits [15:0] - transmit length
unsigned char XmtCtl; // word 0, bits [23:16] - transmit control byte
unsigned char Cmd; // word 0, bits [31:24] - transmit command plus misc.
u32 XmtBufId; // word 1, bits [31:0] - transmit buffer ID
unsigned char TcpStrt; // word 2, bits [7:0] - byte address of TCP header
unsigned char IpStrt; // word 2, bits [15:8] - byte address of IP header
ushort IpCkSum; // word 2, bits [31:16] - partial IP checksum
ushort TcpCkSum; // word 3, bits [15:0] - partial TCP checksum
ushort Rsvd1; // word 3, bits [31:16] - PAD
u32 Rsvd2; // word 4, bits [31:0] - PAD
u32 Rsvd3; // word 5, bits [31:0] - PAD
u32 Rsvd4; // word 6, bits [31:0] - PAD
u32 Rsvd5; // word 7, bits [31:0] - PAD
ushort XmtLen; /* word 0, bits [15:0] - transmit length */
unsigned char XmtCtl; /* word 0, bits [23:16] - transmit control byte */
unsigned char Cmd; /* word 0, bits [31:24] - transmit command plus misc. */
u32 XmtBufId; /* word 1, bits [31:0] - transmit buffer ID */
unsigned char TcpStrt; /* word 2, bits [7:0] - byte address of TCP header */
unsigned char IpStrt; /* word 2, bits [15:8] - byte address of IP header */
ushort IpCkSum; /* word 2, bits [31:16] - partial IP checksum */
ushort TcpCkSum; /* word 3, bits [15:0] - partial TCP checksum */
ushort Rsvd1; /* word 3, bits [31:16] - PAD */
u32 Rsvd2; /* word 4, bits [31:0] - PAD */
u32 Rsvd3; /* word 5, bits [31:0] - PAD */
u32 Rsvd4; /* word 6, bits [31:0] - PAD */
u32 Rsvd5; /* word 7, bits [31:0] - PAD */
};
#pragma pack(pop)
// struct xmt_desc Cmd byte definitions
// command codes
#define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor
#define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor
#define XMT_DESC_CMD_FORMAT 2 // format descriptor
#define XMT_DESC_CMD_PRIME 3 // prime descriptor
#define XMT_DESC_CMD_CODE_SHFT 6 // comand code shift (shift to bits [31:30] in word 0)
// shifted command codes
/* struct xmt_desc Cmd byte definitions */
/* command codes */
#define XMT_DESC_CMD_RAW_SEND 0 /* raw send descriptor */
#define XMT_DESC_CMD_CSUM_INSERT 1 /* checksum insert descriptor */
#define XMT_DESC_CMD_FORMAT 2 /* format descriptor */
#define XMT_DESC_CMD_PRIME 3 /* prime descriptor */
#define XMT_DESC_CMD_CODE_SHFT 6 /* comand code shift (shift to bits [31:30] in word 0) */
/* shifted command codes */
#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT)
#define XMT_CSUM_INSERT (XMT_DESC_CMD_CSUM_INSERT << XMT_DESC_CMD_CODE_SHFT)
#define XMT_FORMAT (XMT_DESC_CMD_FORMAT << XMT_DESC_CMD_CODE_SHFT)
#define XMT_PRIME (XMT_DESC_CMD_PRIME << XMT_DESC_CMD_CODE_SHFT)
// struct xmt_desc Control Byte (XmtCtl) definitions
// NOTE: These bits do not work on Sahara (Rev A)!
#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics)
#define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics)
#define XMT_CTL_PER_PKT_QUAL 0x20 // per packet qualifier
#define XMT_CTL_PAD_MODE_NONE 0x00 // do not pad frame
#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes
#define XMT_CTL_PAD_MODE_VLAN_68 0x10 // pad frame to 64 bytes, and VLAN frames to 68 bytes
#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes
#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame
#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution
#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word
#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words
#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words
// struct xmt_desc XmtBufId definition
#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing
// the buffer (DRAM) address by 256 (or << 8)
/*****************************************************************************
* Receiver Sequencer Definitions
*****************************************************************************/
// Receive Event Queue (queues 3 - 6) bit definitions
#define RCV_EVTQ_RBFID_MASK 0x0000FFFF // bit mask for the Receive Buffer ID
// Receive Buffer ID definition
#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing
// the buffer (DRAM) address by 32 (or << 5)
// Format of the 18 byte Receive Buffer returned by the
// Receive Sequencer for received packets
/*
* struct xmt_desc Control Byte (XmtCtl) definitions
* NOTE: These bits do not work on Sahara (Rev A)!
*/
#define XMT_CTL_PAUSE_FRAME 0x80 /* current frame is a pause control frame (for statistics) */
#define XMT_CTL_CONTROL_FRAME 0x40 /* current frame is a control frame (for statistics) */
#define XMT_CTL_PER_PKT_QUAL 0x20 /* per packet qualifier */
#define XMT_CTL_PAD_MODE_NONE 0x00 /* do not pad frame */
#define XMT_CTL_PAD_MODE_64 0x08 /* pad frame to 64 bytes */
#define XMT_CTL_PAD_MODE_VLAN_68 0x10 /* pad frame to 64 bytes, and VLAN frames to 68 bytes */
#define XMT_CTL_PAD_MODE_68 0x18 /* pad frame to 68 bytes */
#define XMT_CTL_GEN_FCS 0x04 /* generate FCS (CRC) for this frame */
#define XMT_CTL_DELAY_FCS_0 0x00 /* do not delay FCS calcution */
#define XMT_CTL_DELAY_FCS_1 0x01 /* delay FCS calculation by 1 (4-byte) word */
#define XMT_CTL_DELAY_FCS_2 0x02 /* delay FCS calculation by 2 (4-byte) words */
#define XMT_CTL_DELAY_FCS_3 0x03 /* delay FCS calculation by 3 (4-byte) words */
/* struct xmt_desc XmtBufId definition */
#define XMT_BUF_ID_SHFT 8 /* The Xmt buffer ID is formed by dividing */
/* the buffer (DRAM) address by 256 (or << 8) */
/* Receiver Sequencer Definitions */
/* Receive Event Queue (queues 3 - 6) bit definitions */
#define RCV_EVTQ_RBFID_MASK 0x0000FFFF /* bit mask for the Receive Buffer ID */
/* Receive Buffer ID definition */
#define RCV_BUF_ID_SHFT 5 /* The Rcv buffer ID is formed by dividing */
/* the buffer (DRAM) address by 32 (or << 5) */
/*
* Format of the 18 byte Receive Buffer returned by the
* Receive Sequencer for received packets
*/
#pragma pack(push, 1)
struct rcv_buf_hdr {
u32 Status; // Status word from Rcv Seq Parser
ushort Length; // Rcv packet byte count
u32 Status; /* Status word from Rcv Seq Parser */
ushort Length; /* Rcv packet byte count */
union {
ushort TcpCsum; // TCP checksum
ushort TcpCsum; /* TCP checksum */
struct {
unsigned char TcpCsumL; // lower 8 bits of the TCP checksum
unsigned char LinkHash; // Link hash (multicast frames only)
unsigned char TcpCsumL; /* lower 8 bits of the TCP checksum */
unsigned char LinkHash; /* Link hash (multicast frames only) */
};
};
ushort SktHash; // Socket hash
unsigned char TcpHdrOffset; // TCP header offset into packet
unsigned char IpHdrOffset; // IP header offset into packet
u32 TpzHash; // Toeplitz hash
ushort Reserved; // Reserved
ushort SktHash; /* Socket hash */
unsigned char TcpHdrOffset; /* TCP header offset into packet */
unsigned char IpHdrOffset; /* IP header offset into packet */
u32 TpzHash; /* Toeplitz hash */
ushort Reserved; /* Reserved */
};
#pragma pack(pop)
/*****************************************************************************
* Queue definitions
*****************************************************************************/
/* Queue definitions */
/* Ingress (read only) queue numbers */
#define PXY_BUF_Q 0 /* Proxy Buffer Queue */
#define HST_EVT_Q 1 /* Host Event Queue */
#define XMT_BUF_Q 2 /* Transmit Buffer Queue */
#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */
#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */
#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */
#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */
#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */
#define PXY_BUF_Q 0 /* Proxy Buffer Queue */
#define HST_EVT_Q 1 /* Host Event Queue */
#define XMT_BUF_Q 2 /* Transmit Buffer Queue */
#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */
#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */
#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */
#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */
#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */
/* Local (read/write) queue numbers */
#define LOCAL_A_Q 8 /* Spare local Queue */
#define LOCAL_B_Q 9 /* Spare local Queue */
#define LOCAL_C_Q 10 /* Spare local Queue */
#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */
#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */
#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */
#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */
#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */
#define LOCAL_A_Q 8 /* Spare local Queue */
#define LOCAL_B_Q 9 /* Spare local Queue */
#define LOCAL_C_Q 10 /* Spare local Queue */
#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */
#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */
#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */
#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */
#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */
/* Egress (write only) queue numbers */
#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */
#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */
#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */
#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */
#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */
#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */
#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */
#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */
#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */
#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */
#define RCV_BUF_Q 26 /* Receive Buffer Queue */
#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */
#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */
#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */
#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */
#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */
#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */
#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */
#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */
#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */
#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */
#define RCV_BUF_Q 26 /* Receive Buffer Queue */
/* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */
#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */
#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */
#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */
/*****************************************************************************
* SXG EEPROM/Flash Configuration Definitions
*****************************************************************************/
// Location of configuration data in EEPROM or Flash
#define EEPROM_CONFIG_START_ADDR 0x00 // start addr for config info in EEPROM
#define FLASH_CONFIG_START_ADDR 0x80 // start addr for config info in Flash
// Configuration data section defines
#define HW_CFG_SECTION_SIZE 512 // size of H/W section
#define HW_CFG_SECTION_SIZE_A 256 // size of H/W section (Sahara rev A)
#define SW_CFG_SECTION_START 512 // starting location (offset) of S/W section
#define SW_CFG_SECTION_START_A 256 // starting location (offset) of S/W section (Sahara rev A)
#define SW_CFG_SECTION_SIZE 128 // size of S/W section
#define HW_CFG_MAGIC_WORD 0xA5A5 // H/W configuration data magic word
// Goes in Addr field of first HW_CFG_DATA entry
#define HW_CFG_TERMINATOR 0xFFFF // H/W configuration data terminator
// Goes in Addr field of last HW_CFG_DATA entry
#define SW_CFG_MAGIC_WORD 0x5A5A // S/W configuration data magic word
#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */
#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */
#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */
/* SXG EEPROM/Flash Configuration Definitions */
/* Location of configuration data in EEPROM or Flash */
#define EEPROM_CONFIG_START_ADDR 0x00 /* start addr for config info in EEPROM */
#define FLASH_CONFIG_START_ADDR 0x80 /* start addr for config info in Flash */
/* Configuration data section defines */
#define HW_CFG_SECTION_SIZE 512 /* size of H/W section */
#define HW_CFG_SECTION_SIZE_A 256 /* size of H/W section (Sahara rev A) */
#define SW_CFG_SECTION_START 512 /* starting location (offset) of S/W section */
#define SW_CFG_SECTION_START_A 256 /* starting location (offset) of S/W section (Sahara rev A) */
#define SW_CFG_SECTION_SIZE 128 /* size of S/W section */
#define HW_CFG_MAGIC_WORD 0xA5A5 /* H/W configuration data magic word */
/* Goes in Addr field of first struct hw_cfg_data entry */
#define HW_CFG_TERMINATOR 0xFFFF /* H/W configuration data terminator */
/* Goes in Addr field of last struct hw_cfg_data entry */
#define SW_CFG_MAGIC_WORD 0x5A5A /* S/W configuration data magic word */
#pragma pack(push, 1)
// Structure for an element of H/W configuration data.
// Read by the Sahara hardware
/*
* Structure for an element of H/W configuration data.
* Read by the Sahara hardware
*/
struct hw_cfg_data {
ushort Addr;
ushort Data;
};
// Number of struct hw_cfg_data structures to put in the configuration data
// data structure (struct sxg_config or struct sxg_config_a). The number is computed
// to fill the entire H/W config section of the structure.
/*
* Number of struct hw_cfg_data structures to put in the configuration data
* data structure (struct sxg_config or struct sxg_config_a). The number is computed
* to fill the entire H/W config section of the structure.
*/
#define NUM_HW_CFG_ENTRIES (HW_CFG_SECTION_SIZE / sizeof(struct hw_cfg_data))
#define NUM_HW_CFG_ENTRIES_A (HW_CFG_SECTION_SIZE_A / sizeof(struct hw_cfg_data))
......@@ -734,73 +736,69 @@ struct atk_fru {
#define EMC_FRU_FORMAT 0x0005
#define NO_FRU_FORMAT 0xFFFF
#define ATK_OEM_ASSY_SIZE 10 // assy num is 9 chars plus \0
#define ATK_OEM_ASSY_SIZE 10 /* assy num is 9 chars plus \0 */
// OEM FRU structure for Alacritech
/* OEM FRU structure for Alacritech */
struct atk_oem {
unsigned char Assy[ATK_OEM_ASSY_SIZE];
};
#define OEM_EEPROM_FRUSIZE 74 // size of OEM fru info - size
// chosen to fill out the S/W section
#define OEM_EEPROM_FRUSIZE 74 /* size of OEM fru info - size */
/* chosen to fill out the S/W section */
union oem_fru { // OEM FRU information
union oem_fru { /* OEM FRU information */
unsigned char OemFru[OEM_EEPROM_FRUSIZE];
struct atk_oem AtkOem;
};
// Structure to hold the S/W configuration data.
/* Structure to hold the S/W configuration data. */
struct sw_cfg_data {
ushort MagicWord; // Magic word for section 2
ushort Version; // Format version
struct sxg_config_mac MacAddr[4]; // space for 4 MAC addresses
struct atk_fru AtkFru; // FRU information
ushort OemFruFormat; // OEM FRU format type
union oem_fru OemFru; // OEM FRU information
ushort Checksum; // Checksum of section 2
ushort MagicWord; /* Magic word for section 2 */
ushort Version; /* Format version */
struct sxg_config_mac MacAddr[4]; /* space for 4 MAC addresses */
struct atk_fru AtkFru; /* FRU information */
ushort OemFruFormat; /* OEM FRU format type */
union oem_fru OemFru; /* OEM FRU information */
ushort Checksum; /* Checksum of section 2 */
};
/* EEPROM/Flash Format */
struct sxg_config {
/*
* H/W Section - Read by Sahara hardware (512 bytes)
*/
/* H/W Section - Read by Sahara hardware (512 bytes) */
struct hw_cfg_data HwCfg[NUM_HW_CFG_ENTRIES];
/*
* S/W Section - Other configuration data (128 bytes)
*/
/* S/W Section - Other configuration data (128 bytes) */
struct sw_cfg_data SwCfg;
};
// EEPROM/Flash Format (Sahara rev A)
/* EEPROM/Flash Format (Sahara rev A) */
struct sxg_config_a {
/*
* H/W Section - Read by Sahara hardware (256 bytes)
*/
/* H/W Section - Read by Sahara hardware (256 bytes) */
struct hw_cfg_data HwCfg[NUM_HW_CFG_ENTRIES_A];
/*
* S/W Section - Other configuration data (128 bytes)
*/
/* S/W Section - Other configuration data (128 bytes) */
struct sw_cfg_data SwCfg;
};
#ifdef WINDOWS_COMPILER
// The following macro is something of a kludge, but it is the only way
// that I could find to catch certain programming errors at compile time.
// If the asserted condition is true, then nothing happens. If false, then
// the compiler tries to typedef an array with -1 members, which generates
// an error. Unfortunately, the error message is meaningless, but at least
// it catches the problem. This macro would be unnecessary if the compiler
// allowed the sizeof and offsetof macros to be used in the #if directive.
/*
* The following macro is something of a kludge, but it is the only way
* that I could find to catch certain programming errors at compile time.
* If the asserted condition is true, then nothing happens. If false, then
* the compiler tries to typedef an array with -1 members, which generates
* an error. Unfortunately, the error message is meaningless, but at least
* it catches the problem. This macro would be unnecessary if the compiler
* allowed the sizeof and offsetof macros to be used in the #if directive.
*/
#define compile_time_assert(cond) \
typedef char comp_error[(cond) ? 1 : -1]
// A compiler error on either of the next two lines indicates that the SXG_CONFIG
// structure was built incorrectly. Unfortunately, the error message produced
// is meaningless. But this is apparently the only way to catch this problem
// at compile time.
/*
* A compiler error on either of the next two lines indicates that the struct sxg_config
* structure was built incorrectly. Unfortunately, the error message produced
* is meaningless. But this is apparently the only way to catch this problem
* at compile time.
*/
compile_time_assert (offsetof(struct sxg_config, SwCfg) == SW_CFG_SECTION_START);
compile_time_assert (sizeof(struct sxg_config) == HW_CFG_SECTION_SIZE + SW_CFG_SECTION_SIZE);
......@@ -813,11 +811,11 @@ compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A + SW_C
*/
struct adapt_userinfo {
bool LinkUp;
// u32 LinkState; // use LinkUp - any need for other states?
u32 LinkSpeed; // not currently needed
u32 LinkDuplex; // not currently needed
u32 Port; // not currently needed
u32 PhysPort; // not currently needed
/* u32 LinkState; * use LinkUp - any need for other states? */
u32 LinkSpeed; /* not currently needed */
u32 LinkDuplex; /* not currently needed */
u32 Port; /* not currently needed */
u32 PhysPort; /* not currently needed */
ushort PciLanes;
unsigned char MacAddr[6];
unsigned char CurrMacAddr[6];
......@@ -828,24 +826,22 @@ struct adapt_userinfo {
#pragma pack(pop)
/*****************************************************************************
* Miscellaneous Hardware definitions
*****************************************************************************/
/* Miscellaneous Hardware definitions */
// Type of ASIC in use
/* Type of ASIC in use */
enum ASIC_TYPE{
SAHARA_REV_A,
SAHARA_REV_B
};
// Sahara (ASIC level) defines
#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB
#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB
#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB)
#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits)
// Arabia (board level) defines
#define FLASH_SIZE 0x080000 // 512 KB (4 Mb)
#define EEPROM_SIZE_XFMR 1024 // EEPROM size (bytes), including xfmr area
#define EEPROM_SIZE_NO_XFMR 640 // EEPROM size excluding xfmr area (512 + 128)
#define EEPROM_SIZE_REV_A 512 // EEPROM size for Sahara rev A
/* Sahara (ASIC level) defines */
#define SAHARA_GRAM_SIZE 0x020000 /* GRAM size - 128 KB */
#define SAHARA_DRAM_SIZE 0x200000 /* DRAM size - 2 MB */
#define SAHARA_QRAM_SIZE 0x004000 /* QRAM size - 16K entries (64 KB) */
#define SAHARA_WCS_SIZE 0x002000 /* WCS - 8K instructions (x 108 bits) */
/* Arabia (board level) defines */
#define FLASH_SIZE 0x080000 /* 512 KB (4 Mb) */
#define EEPROM_SIZE_XFMR 1024 /* EEPROM size (bytes), including xfmr area */
#define EEPROM_SIZE_NO_XFMR 640 /* EEPROM size excluding xfmr area (512 + 128) */
#define EEPROM_SIZE_REV_A 512 /* EEPROM size for Sahara rev A */
/*
/********************************************************************
* Copyright (C) 1997-2008 Alacritech, Inc. All rights reserved
*
* sxgphycode.h:
*
* This file PHY microcode and register initialization data.
*/
********************************************************************/
/**********************************************************************
/*
* PHY Microcode
*
* The following contains both PHY microcode and PHY register
* initialization data. It is specific to both the PHY and the
* type of transceiver.
*
**********************************************************************/
/*
* Download for AEL2005C PHY with SR/LR transceiver (10GBASE-SR or 10GBASE-LR)
*/
/* Download for AEL2005C PHY with SR/LR transceiver (10GBASE-SR or 10GBASE-LR) */
static struct phy_ucode PhyUcode[] = {
/*
* NOTE: An address of 0 is a special case. When the download routine
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment