Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
15f4cd09
Commit
15f4cd09
authored
May 06, 2003
by
Chas Williams
Committed by
David S. Miller
May 06, 2003
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[ATM]: Add Forerunner HE support.
parent
52f568fe
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
4392 additions
and
0 deletions
+4392
-0
drivers/atm/Kconfig
drivers/atm/Kconfig
+14
-0
drivers/atm/Makefile
drivers/atm/Makefile
+4
-0
drivers/atm/he.c
drivers/atm/he.c
+3419
-0
drivers/atm/he.h
drivers/atm/he.h
+935
-0
include/linux/atm_he.h
include/linux/atm_he.h
+20
-0
No files found.
drivers/atm/Kconfig
View file @
15f4cd09
...
...
@@ -440,5 +440,19 @@ config ATM_FORE200E
default m if ATM_FORE200E_MAYBE!=y
default y if ATM_FORE200E_MAYBE=y
config ATM_HE
tristate "ForeRunner HE Series"
depends on PCI && ATM
help
This is a driver for the Marconi ForeRunner HE-series ATM adapter
cards. It simultaneously supports the 155 and 622 versions.
config ATM_HE_USE_SUNI
bool "Use S/UNI PHY driver"
depends on ATM_HE
help
Support for the S/UNI-Ultra and S/UNI-622 found in the ForeRunner
HE cards. This driver provides carrier detection some statistics.
endmenu
drivers/atm/Makefile
View file @
15f4cd09
...
...
@@ -49,6 +49,10 @@ ifeq ($(CONFIG_ATM_FORE200E_SBA),y)
CONFIG_ATM_FORE200E_SBA_FW
:=
$(obj)
/sba200e_ecd.bin2
endif
endif
obj-$(CONFIG_ATM_HE)
+=
he.o
ifeq
($(CONFIG_ATM_HE_USE_SUNI),y)
obj-$(CONFIG_ATM_HE)
+=
suni.o
endif
# FORE Systems 200E-series firmware magic
$(obj)/fore200e_pca_fw.c
:
$(patsubst "%"
,
%
,
$(CONFIG_ATM_FORE200E_PCA_FW))
\
...
...
drivers/atm/he.c
0 → 100644
View file @
15f4cd09
/* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
/*
he.c
ForeRunnerHE ATM Adapter driver for ATM on Linux
Copyright (C) 1999-2001 Naval Research Laboratory
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
he.c
ForeRunnerHE ATM Adapter driver for ATM on Linux
Copyright (C) 1999-2001 Naval Research Laboratory
Permission to use, copy, modify and distribute this software and its
documentation is hereby granted, provided that both the copyright
notice and this permission notice appear in all copies of the software,
derivative works or modified versions, and any portions thereof, and
that both notices appear in supporting documentation.
NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
RESULTING FROM THE USE OF THIS SOFTWARE.
This driver was written using the "Programmer's Reference Manual for
ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
AUTHORS:
chas williams <chas@cmf.nrl.navy.mil>
eric kinzie <ekinzie@cmf.nrl.navy.mil>
NOTES:
4096 supported 'connections'
group 0 is used for all traffic
interrupt queue 0 is used for all interrupts
aal0 support for receive only
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <linux/atmdev.h>
#include <linux/atm.h>
#include <linux/sonet.h>
#ifndef ATM_OC12_PCR
#define ATM_OC12_PCR (622080000/1080*1040/8/53)
#endif
#ifdef BUS_INT_WAR
void
sn_add_polled_interrupt
(
int
irq
,
int
interval
);
void
sn_delete_polled_interrupt
(
int
irq
);
#endif
#define USE_TASKLET
#define USE_HE_FIND_VCC
#undef USE_SCATTERGATHER
#undef USE_CHECKSUM_HW
/* still confused about this */
#define USE_RBPS
#undef USE_RBPS_POOL
/* if memory is tight try this */
#undef USE_RBPL_POOL
/* if memory is tight try this */
#define USE_TPD_POOL
/* #undef CONFIG_ATM_HE_USE_SUNI */
/* 2.2 kernel support */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,43)
#define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
#define dev_kfree_skb_any(skb) dev_kfree_skb(skb)
#undef USE_TASKLET
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,18)
#define set_current_state(x) current->state = (x);
#endif
#include "he.h"
#include "suni.h"
#include <linux/atm_he.h>
#define hprintk(fmt,args...) printk(DEV_LABEL "%d: " fmt, he_dev->number, args)
#define hprintk1(fmt) printk(DEV_LABEL "%d: " fmt, he_dev->number)
#undef DEBUG
#ifdef DEBUG
#define HPRINTK(fmt,args...) hprintk(fmt,args)
#define HPRINTK1(fmt) hprintk1(fmt)
#else
#define HPRINTK(fmt,args...)
#define HPRINTK1(fmt,args...)
#endif
/* DEBUG */
/* version definition */
static
char
*
version
=
"$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $"
;
/* defines */
#define ALIGN_ADDRESS(addr, alignment) \
((((unsigned long) (addr)) + (((unsigned long) (alignment)) - 1)) & ~(((unsigned long) (alignment)) - 1))
/* declarations */
static
int
he_open
(
struct
atm_vcc
*
vcc
,
short
vpi
,
int
vci
);
static
void
he_close
(
struct
atm_vcc
*
vcc
);
static
int
he_send
(
struct
atm_vcc
*
vcc
,
struct
sk_buff
*
skb
);
static
int
he_sg_send
(
struct
atm_vcc
*
vcc
,
unsigned
long
start
,
unsigned
long
size
);
static
int
he_ioctl
(
struct
atm_dev
*
dev
,
unsigned
int
cmd
,
void
*
arg
);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,69)
static
irqreturn_t
he_irq_handler
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
);
#else
static
void
he_irq_handler
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
);
#endif
static
void
he_tasklet
(
unsigned
long
data
);
static
int
he_proc_read
(
struct
atm_dev
*
dev
,
loff_t
*
pos
,
char
*
page
);
static
int
he_start
(
struct
atm_dev
*
dev
);
static
void
he_stop
(
struct
he_dev
*
dev
);
static
void
he_phy_put
(
struct
atm_dev
*
,
unsigned
char
,
unsigned
long
);
static
unsigned
char
he_phy_get
(
struct
atm_dev
*
,
unsigned
long
);
static
u8
read_prom_byte
(
struct
he_dev
*
he_dev
,
int
addr
);
/* globals */
struct
he_dev
*
he_devs
=
NULL
;
static
short
disable64
=
-
1
;
static
short
nvpibits
=
-
1
;
static
short
nvcibits
=
-
1
;
static
short
rx_skb_reserve
=
16
;
static
short
irq_coalesce
=
1
;
static
short
sdh
=
1
;
static
struct
atmdev_ops
he_ops
=
{
open:
he_open
,
close:
he_close
,
ioctl:
he_ioctl
,
send:
he_send
,
sg_send:
he_sg_send
,
phy_put:
he_phy_put
,
phy_get:
he_phy_get
,
proc_read:
he_proc_read
,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,1)
owner:
THIS_MODULE
#endif
};
/* see the comments in he.h about global_lock */
#define HE_SPIN_LOCK(dev, flags) spin_lock_irqsave(&(dev)->global_lock, flags)
#define HE_SPIN_UNLOCK(dev, flags) spin_unlock_irqrestore(&(dev)->global_lock, flags)
#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while(0)
#define he_readl(dev, reg) readl((dev)->membase + (reg))
/* section 2.12 connection memory access */
static
__inline__
void
he_writel_internal
(
struct
he_dev
*
he_dev
,
unsigned
val
,
unsigned
addr
,
unsigned
flags
)
{
he_writel
(
he_dev
,
val
,
CON_DAT
);
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl
(
he_dev
,
CON_DAT
);
#endif
he_writel
(
he_dev
,
flags
|
CON_CTL_WRITE
|
CON_CTL_ADDR
(
addr
),
CON_CTL
);
while
(
he_readl
(
he_dev
,
CON_CTL
)
&
CON_CTL_BUSY
);
}
#define he_writel_rcm(dev, val, reg) \
he_writel_internal(dev, val, reg, CON_CTL_RCM)
#define he_writel_tcm(dev, val, reg) \
he_writel_internal(dev, val, reg, CON_CTL_TCM)
#define he_writel_mbox(dev, val, reg) \
he_writel_internal(dev, val, reg, CON_CTL_MBOX)
static
unsigned
he_readl_internal
(
struct
he_dev
*
he_dev
,
unsigned
addr
,
unsigned
flags
)
{
he_writel
(
he_dev
,
flags
|
CON_CTL_READ
|
CON_CTL_ADDR
(
addr
),
CON_CTL
);
while
(
he_readl
(
he_dev
,
CON_CTL
)
&
CON_CTL_BUSY
);
return
he_readl
(
he_dev
,
CON_DAT
);
}
#define he_readl_rcm(dev, reg) \
he_readl_internal(dev, reg, CON_CTL_RCM)
#define he_readl_tcm(dev, reg) \
he_readl_internal(dev, reg, CON_CTL_TCM)
#define he_readl_mbox(dev, reg) \
he_readl_internal(dev, reg, CON_CTL_MBOX)
/* figure 2.2 connection id */
#define he_mkcid(dev, vpi, vci) (((vpi<<(dev)->vcibits) | vci) & 0x1fff)
/* 2.5.1 per connection transmit state registers */
#define he_writel_tsr0(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 0)
#define he_readl_tsr0(dev, cid) \
he_readl_tcm(dev, CONFIG_TSRA | (cid<<3) | 0)
#define he_writel_tsr1(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 1)
#define he_writel_tsr2(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 2)
#define he_writel_tsr3(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 3)
#define he_writel_tsr4(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 4)
/* from page 2-20
*
* NOTE While the transmit connection is active, bits 23 through 0
* of this register must not be written by the host. Byte
* enables should be used during normal operation when writing
* the most significant byte.
*/
#define he_writel_tsr4_upper(dev, val, cid) \
he_writel_internal(dev, val, CONFIG_TSRA | (cid<<3) | 4, \
CON_CTL_TCM \
| CON_BYTE_DISABLE_2 \
| CON_BYTE_DISABLE_1 \
| CON_BYTE_DISABLE_0)
#define he_readl_tsr4(dev, cid) \
he_readl_tcm(dev, CONFIG_TSRA | (cid<<3) | 4)
#define he_writel_tsr5(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 5)
#define he_writel_tsr6(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 6)
#define he_writel_tsr7(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRA | (cid<<3) | 7)
#define he_writel_tsr8(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRB | (cid<<2) | 0)
#define he_writel_tsr9(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRB | (cid<<2) | 1)
#define he_writel_tsr10(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRB | (cid<<2) | 2)
#define he_writel_tsr11(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRB | (cid<<2) | 3)
#define he_writel_tsr12(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRC | (cid<<1) | 0)
#define he_writel_tsr13(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRC | (cid<<1) | 1)
#define he_writel_tsr14(dev, val, cid) \
he_writel_tcm(dev, val, CONFIG_TSRD | cid)
#define he_writel_tsr14_upper(dev, val, cid) \
he_writel_internal(dev, val, CONFIG_TSRD | cid, \
CON_CTL_TCM \
| CON_BYTE_DISABLE_2 \
| CON_BYTE_DISABLE_1 \
| CON_BYTE_DISABLE_0)
/* 2.7.1 per connection receive state registers */
#define he_writel_rsr0(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 0)
#define he_readl_rsr0(dev, cid) \
he_readl_rcm(dev, 0x00000 | (cid<<3) | 0)
#define he_writel_rsr1(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 1)
#define he_writel_rsr2(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 2)
#define he_writel_rsr3(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 3)
#define he_writel_rsr4(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 4)
#define he_writel_rsr5(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 5)
#define he_writel_rsr6(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 6)
#define he_writel_rsr7(dev, val, cid) \
he_writel_rcm(dev, val, 0x00000 | (cid<<3) | 7)
static
__inline__
struct
atm_vcc
*
he_find_vcc
(
struct
he_dev
*
he_dev
,
unsigned
cid
)
{
struct
atm_vcc
*
vcc
;
short
vpi
;
int
vci
;
vpi
=
cid
>>
he_dev
->
vcibits
;
vci
=
cid
&
((
1
<<
he_dev
->
vcibits
)
-
1
);
for
(
vcc
=
he_dev
->
atm_dev
->
vccs
;
vcc
;
vcc
=
vcc
->
next
)
if
(
vcc
->
vci
==
vci
&&
vcc
->
vpi
==
vpi
&&
vcc
->
qos
.
rxtp
.
traffic_class
!=
ATM_NONE
)
return
vcc
;
return
NULL
;
}
static
int
__devinit
he_init_one
(
struct
pci_dev
*
pci_dev
,
const
struct
pci_device_id
*
pci_ent
)
{
struct
atm_dev
*
atm_dev
;
struct
he_dev
*
he_dev
;
printk
(
KERN_INFO
"he: %s
\n
"
,
version
);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,43)
if
(
pci_enable_device
(
pci_dev
))
return
-
EIO
;
#endif
if
(
pci_set_dma_mask
(
pci_dev
,
HE_DMA_MASK
)
!=
0
)
{
printk
(
KERN_WARNING
"he: no suitable dma available
\n
"
);
return
-
EIO
;
}
atm_dev
=
atm_dev_register
(
DEV_LABEL
,
&
he_ops
,
-
1
,
0
);
if
(
!
atm_dev
)
return
-
ENODEV
;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,3)
pci_set_drvdata
(
pci_dev
,
atm_dev
);
#else
pci_dev
->
driver_data
=
atm_dev
;
#endif
he_dev
=
(
struct
he_dev
*
)
kmalloc
(
sizeof
(
struct
he_dev
),
GFP_KERNEL
);
if
(
!
he_dev
)
return
-
ENOMEM
;
memset
(
he_dev
,
0
,
sizeof
(
struct
he_dev
));
he_dev
->
pci_dev
=
pci_dev
;
he_dev
->
atm_dev
=
atm_dev
;
he_dev
->
atm_dev
->
dev_data
=
he_dev
;
HE_DEV
(
atm_dev
)
=
he_dev
;
he_dev
->
number
=
atm_dev
->
number
;
/* was devs */
if
(
he_start
(
atm_dev
))
{
atm_dev_deregister
(
atm_dev
);
he_stop
(
he_dev
);
kfree
(
he_dev
);
return
-
ENODEV
;
}
he_dev
->
next
=
NULL
;
if
(
he_devs
)
he_dev
->
next
=
he_devs
;
he_devs
=
he_dev
;
return
0
;
}
static
void
__devexit
he_remove_one
(
struct
pci_dev
*
pci_dev
)
{
struct
atm_dev
*
atm_dev
;
struct
he_dev
*
he_dev
;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,3)
atm_dev
=
pci_get_drvdata
(
pci_dev
);
#else
atm_dev
=
pci_dev
->
driver_data
;
#endif
he_dev
=
HE_DEV
(
atm_dev
);
/* need to remove from he_devs */
he_stop
(
he_dev
);
atm_dev_deregister
(
atm_dev
);
kfree
(
he_dev
);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,3)
pci_set_drvdata
(
pci_dev
,
NULL
);
#else
pci_dev
->
driver_data
=
NULL
;
#endif
}
static
unsigned
rate_to_atmf
(
unsigned
rate
)
/* cps to atm forum format */
{
#define NONZERO (1<<14)
unsigned
exp
=
0
;
if
(
rate
==
0
)
return
(
0
);
rate
<<=
9
;
while
(
rate
>
0x3ff
)
{
++
exp
;
rate
>>=
1
;
}
return
(
NONZERO
|
(
exp
<<
9
)
|
(
rate
&
0x1ff
));
}
static
void
__init
he_init_rx_lbfp0
(
struct
he_dev
*
he_dev
)
{
unsigned
i
,
lbm_offset
,
lbufd_index
,
lbuf_addr
,
lbuf_count
;
unsigned
lbufs_per_row
=
he_dev
->
cells_per_row
/
he_dev
->
cells_per_lbuf
;
unsigned
lbuf_bufsize
=
he_dev
->
cells_per_lbuf
*
ATM_CELL_PAYLOAD
;
unsigned
row_offset
=
he_dev
->
r0_startrow
*
he_dev
->
bytes_per_row
;
lbufd_index
=
0
;
lbm_offset
=
he_readl
(
he_dev
,
RCMLBM_BA
);
he_writel
(
he_dev
,
lbufd_index
,
RLBF0_H
);
for
(
i
=
0
,
lbuf_count
=
0
;
i
<
he_dev
->
r0_numbuffs
;
++
i
)
{
lbufd_index
+=
2
;
lbuf_addr
=
(
row_offset
+
(
lbuf_count
*
lbuf_bufsize
))
/
32
;
he_writel_rcm
(
he_dev
,
lbuf_addr
,
lbm_offset
);
he_writel_rcm
(
he_dev
,
lbufd_index
,
lbm_offset
+
1
);
if
(
++
lbuf_count
==
lbufs_per_row
)
{
lbuf_count
=
0
;
row_offset
+=
he_dev
->
bytes_per_row
;
}
lbm_offset
+=
4
;
}
he_writel
(
he_dev
,
lbufd_index
-
2
,
RLBF0_T
);
he_writel
(
he_dev
,
he_dev
->
r0_numbuffs
,
RLBF0_C
);
}
static
void
__init
he_init_rx_lbfp1
(
struct
he_dev
*
he_dev
)
{
unsigned
i
,
lbm_offset
,
lbufd_index
,
lbuf_addr
,
lbuf_count
;
unsigned
lbufs_per_row
=
he_dev
->
cells_per_row
/
he_dev
->
cells_per_lbuf
;
unsigned
lbuf_bufsize
=
he_dev
->
cells_per_lbuf
*
ATM_CELL_PAYLOAD
;
unsigned
row_offset
=
he_dev
->
r1_startrow
*
he_dev
->
bytes_per_row
;
lbufd_index
=
1
;
lbm_offset
=
he_readl
(
he_dev
,
RCMLBM_BA
)
+
(
2
*
lbufd_index
);
he_writel
(
he_dev
,
lbufd_index
,
RLBF1_H
);
for
(
i
=
0
,
lbuf_count
=
0
;
i
<
he_dev
->
r1_numbuffs
;
++
i
)
{
lbufd_index
+=
2
;
lbuf_addr
=
(
row_offset
+
(
lbuf_count
*
lbuf_bufsize
))
/
32
;
he_writel_rcm
(
he_dev
,
lbuf_addr
,
lbm_offset
);
he_writel_rcm
(
he_dev
,
lbufd_index
,
lbm_offset
+
1
);
if
(
++
lbuf_count
==
lbufs_per_row
)
{
lbuf_count
=
0
;
row_offset
+=
he_dev
->
bytes_per_row
;
}
lbm_offset
+=
4
;
}
he_writel
(
he_dev
,
lbufd_index
-
2
,
RLBF1_T
);
he_writel
(
he_dev
,
he_dev
->
r1_numbuffs
,
RLBF1_C
);
}
static
void
__init
he_init_tx_lbfp
(
struct
he_dev
*
he_dev
)
{
unsigned
i
,
lbm_offset
,
lbufd_index
,
lbuf_addr
,
lbuf_count
;
unsigned
lbufs_per_row
=
he_dev
->
cells_per_row
/
he_dev
->
cells_per_lbuf
;
unsigned
lbuf_bufsize
=
he_dev
->
cells_per_lbuf
*
ATM_CELL_PAYLOAD
;
unsigned
row_offset
=
he_dev
->
tx_startrow
*
he_dev
->
bytes_per_row
;
lbufd_index
=
he_dev
->
r0_numbuffs
+
he_dev
->
r1_numbuffs
;
lbm_offset
=
he_readl
(
he_dev
,
RCMLBM_BA
)
+
(
2
*
lbufd_index
);
he_writel
(
he_dev
,
lbufd_index
,
TLBF_H
);
for
(
i
=
0
,
lbuf_count
=
0
;
i
<
he_dev
->
tx_numbuffs
;
++
i
)
{
lbufd_index
+=
1
;
lbuf_addr
=
(
row_offset
+
(
lbuf_count
*
lbuf_bufsize
))
/
32
;
he_writel_rcm
(
he_dev
,
lbuf_addr
,
lbm_offset
);
he_writel_rcm
(
he_dev
,
lbufd_index
,
lbm_offset
+
1
);
if
(
++
lbuf_count
==
lbufs_per_row
)
{
lbuf_count
=
0
;
row_offset
+=
he_dev
->
bytes_per_row
;
}
lbm_offset
+=
2
;
}
he_writel
(
he_dev
,
lbufd_index
-
1
,
TLBF_T
);
}
static
int
__init
he_init_tpdrq
(
struct
he_dev
*
he_dev
)
{
he_dev
->
tpdrq_base
=
pci_alloc_consistent
(
he_dev
->
pci_dev
,
CONFIG_TPDRQ_SIZE
*
sizeof
(
struct
he_tpdrq
),
&
he_dev
->
tpdrq_phys
);
if
(
he_dev
->
tpdrq_base
==
NULL
)
{
hprintk1
(
"failed to alloc tpdrq
\n
"
);
return
-
ENOMEM
;
}
memset
(
he_dev
->
tpdrq_base
,
0
,
CONFIG_TPDRQ_SIZE
*
sizeof
(
struct
he_tpdrq
));
he_dev
->
tpdrq_tail
=
he_dev
->
tpdrq_base
;
he_dev
->
tpdrq_head
=
he_dev
->
tpdrq_base
;
he_writel
(
he_dev
,
he_dev
->
tpdrq_phys
,
TPDRQ_B_H
);
he_writel
(
he_dev
,
0
,
TPDRQ_T
);
he_writel
(
he_dev
,
CONFIG_TPDRQ_SIZE
-
1
,
TPDRQ_S
);
return
0
;
}
static
void
__init
he_init_cs_block
(
struct
he_dev
*
he_dev
)
{
unsigned
clock
,
rate
,
delta
;
int
reg
;
/* 5.1.7 cs block initialization */
for
(
reg
=
0
;
reg
<
0x20
;
++
reg
)
he_writel_mbox
(
he_dev
,
0x0
,
CS_STTIM0
+
reg
);
/* rate grid timer reload values */
clock
=
he_is622
(
he_dev
)
?
66667000
:
50000000
;
rate
=
he_dev
->
atm_dev
->
link_rate
;
delta
=
rate
/
16
/
2
;
for
(
reg
=
0
;
reg
<
0x10
;
++
reg
)
{
/* 2.4 internal transmit function
*
* we initialize the first row in the rate grid.
* values are period (in clock cycles) of timer
*/
unsigned
period
=
clock
/
rate
;
he_writel_mbox
(
he_dev
,
period
,
CS_TGRLD0
+
reg
);
rate
-=
delta
;
}
if
(
he_is622
(
he_dev
))
{
/* table 5.2 (4 cells per lbuf) */
he_writel_mbox
(
he_dev
,
0x000800fa
,
CS_ERTHR0
);
he_writel_mbox
(
he_dev
,
0x000c33cb
,
CS_ERTHR1
);
he_writel_mbox
(
he_dev
,
0x0010101b
,
CS_ERTHR2
);
he_writel_mbox
(
he_dev
,
0x00181dac
,
CS_ERTHR3
);
he_writel_mbox
(
he_dev
,
0x00280600
,
CS_ERTHR4
);
/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
he_writel_mbox
(
he_dev
,
0x023de8b3
,
CS_ERCTL0
);
he_writel_mbox
(
he_dev
,
0x1801
,
CS_ERCTL1
);
he_writel_mbox
(
he_dev
,
0x68b3
,
CS_ERCTL2
);
he_writel_mbox
(
he_dev
,
0x1280
,
CS_ERSTAT0
);
he_writel_mbox
(
he_dev
,
0x68b3
,
CS_ERSTAT1
);
he_writel_mbox
(
he_dev
,
0x14585
,
CS_RTFWR
);
he_writel_mbox
(
he_dev
,
0x4680
,
CS_RTATR
);
/* table 5.8 */
he_writel_mbox
(
he_dev
,
0x00159ece
,
CS_TFBSET
);
he_writel_mbox
(
he_dev
,
0x68b3
,
CS_WCRMAX
);
he_writel_mbox
(
he_dev
,
0x5eb3
,
CS_WCRMIN
);
he_writel_mbox
(
he_dev
,
0xe8b3
,
CS_WCRINC
);
he_writel_mbox
(
he_dev
,
0xdeb3
,
CS_WCRDEC
);
he_writel_mbox
(
he_dev
,
0x68b3
,
CS_WCRCEIL
);
/* table 5.9 */
he_writel_mbox
(
he_dev
,
0x5
,
CS_OTPPER
);
he_writel_mbox
(
he_dev
,
0x14
,
CS_OTWPER
);
}
else
{
/* table 5.1 (4 cells per lbuf) */
he_writel_mbox
(
he_dev
,
0x000400ea
,
CS_ERTHR0
);
he_writel_mbox
(
he_dev
,
0x00063388
,
CS_ERTHR1
);
he_writel_mbox
(
he_dev
,
0x00081018
,
CS_ERTHR2
);
he_writel_mbox
(
he_dev
,
0x000c1dac
,
CS_ERTHR3
);
he_writel_mbox
(
he_dev
,
0x0014051a
,
CS_ERTHR4
);
/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
he_writel_mbox
(
he_dev
,
0x0235e4b1
,
CS_ERCTL0
);
he_writel_mbox
(
he_dev
,
0x4701
,
CS_ERCTL1
);
he_writel_mbox
(
he_dev
,
0x64b1
,
CS_ERCTL2
);
he_writel_mbox
(
he_dev
,
0x1280
,
CS_ERSTAT0
);
he_writel_mbox
(
he_dev
,
0x64b1
,
CS_ERSTAT1
);
he_writel_mbox
(
he_dev
,
0xf424
,
CS_RTFWR
);
he_writel_mbox
(
he_dev
,
0x4680
,
CS_RTATR
);
/* table 5.8 */
he_writel_mbox
(
he_dev
,
0x000563b7
,
CS_TFBSET
);
he_writel_mbox
(
he_dev
,
0x64b1
,
CS_WCRMAX
);
he_writel_mbox
(
he_dev
,
0x5ab1
,
CS_WCRMIN
);
he_writel_mbox
(
he_dev
,
0xe4b1
,
CS_WCRINC
);
he_writel_mbox
(
he_dev
,
0xdab1
,
CS_WCRDEC
);
he_writel_mbox
(
he_dev
,
0x64b1
,
CS_WCRCEIL
);
/* table 5.9 */
he_writel_mbox
(
he_dev
,
0x6
,
CS_OTPPER
);
he_writel_mbox
(
he_dev
,
0x1e
,
CS_OTWPER
);
}
he_writel_mbox
(
he_dev
,
0x8
,
CS_OTTLIM
);
for
(
reg
=
0
;
reg
<
0x8
;
++
reg
)
he_writel_mbox
(
he_dev
,
0x0
,
CS_HGRRT0
+
reg
);
}
static
void
__init
he_init_cs_block_rcm
(
struct
he_dev
*
he_dev
)
{
unsigned
rategrid
[
16
][
16
];
unsigned
rate
,
delta
;
int
i
,
j
,
reg
;
unsigned
rate_atmf
,
exp
,
man
;
unsigned
long
long
rate_cps
;
int
mult
,
buf
,
buf_limit
=
4
;
/* initialize rate grid group table */
for
(
reg
=
0x0
;
reg
<
0xff
;
++
reg
)
he_writel_rcm
(
he_dev
,
0x0
,
CONFIG_RCMABR
+
reg
);
/* initialize rate controller groups */
for
(
reg
=
0x100
;
reg
<
0x1ff
;
++
reg
)
he_writel_rcm
(
he_dev
,
0x0
,
CONFIG_RCMABR
+
reg
);
/* initialize tNrm lookup table */
/* the manual makes reference to a routine in a sample driver
for proper configuration; fortunately, we only need this
in order to support abr connection */
/* initialize rate to group table */
rate
=
he_dev
->
atm_dev
->
link_rate
;
delta
=
rate
/
32
;
/*
* 2.4 transmit internal functions
*
* we construct a copy of the rate grid used by the scheduler
* in order to construct the rate to group table below
*/
for
(
j
=
0
;
j
<
16
;
j
++
)
{
rategrid
[
0
][
j
]
=
rate
;
rate
-=
delta
;
}
for
(
i
=
1
;
i
<
16
;
i
++
)
for
(
j
=
0
;
j
<
16
;
j
++
)
if
(
i
>
14
)
rategrid
[
i
][
j
]
=
rategrid
[
i
-
1
][
j
]
/
4
;
else
rategrid
[
i
][
j
]
=
rategrid
[
i
-
1
][
j
]
/
2
;
/*
* 2.4 transmit internal function
*
* this table maps the upper 5 bits of exponent and mantissa
* of the atm forum representation of the rate into an index
* on rate grid
*/
rate_atmf
=
0
;
while
(
rate_atmf
<
0x400
)
{
man
=
(
rate_atmf
&
0x1f
)
<<
4
;
exp
=
rate_atmf
>>
5
;
/*
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
rate_cps
=
(
unsigned
long
long
)
(
1
<<
exp
)
*
(
man
+
512
)
>>
9
;
if
(
rate_cps
<
10
)
rate_cps
=
10
;
/* 2.2.1 minimum payload rate is 10 cps */
for
(
i
=
255
;
i
>
0
;
i
--
)
if
(
rategrid
[
i
/
16
][
i
%
16
]
>=
rate_cps
)
break
;
/* pick nearest rate instead? */
/*
* each table entry is 16 bits: (rate grid index (8 bits)
* and a buffer limit (8 bits)
* there are two table entries in each 32-bit register
*/
#ifdef notdef
buf
=
rate_cps
*
he_dev
->
tx_numbuffs
/
(
he_dev
->
atm_dev
->
link_rate
*
2
);
#else
/* this is pretty, but avoids _divdu3 and is mostly correct */
buf
=
0
;
mult
=
he_dev
->
atm_dev
->
link_rate
/
ATM_OC3_PCR
;
if
(
rate_cps
>
(
68
*
mult
))
buf
=
1
;
if
(
rate_cps
>
(
136
*
mult
))
buf
=
2
;
if
(
rate_cps
>
(
204
*
mult
))
buf
=
3
;
if
(
rate_cps
>
(
272
*
mult
))
buf
=
4
;
#endif
if
(
buf
>
buf_limit
)
buf
=
buf_limit
;
reg
=
(
reg
<<
16
)
|
((
i
<<
8
)
|
buf
);
#define RTGTBL_OFFSET 0x400
if
(
rate_atmf
&
0x1
)
he_writel_rcm
(
he_dev
,
reg
,
CONFIG_RCMABR
+
RTGTBL_OFFSET
+
(
rate_atmf
>>
1
));
++
rate_atmf
;
}
}
static
int
__init
he_init_group
(
struct
he_dev
*
he_dev
,
int
group
)
{
int
i
;
#ifdef USE_RBPS
/* small buffer pool */
#ifdef USE_RBPS_POOL
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
he_dev
->
rbps_pool
=
pci_pool_create
(
"rbps"
,
he_dev
->
pci_dev
,
CONFIG_RBPS_BUFSIZE
,
8
,
0
,
SLAB_KERNEL
);
#else
he_dev
->
rbps_pool
=
pci_pool_create
(
"rbps"
,
he_dev
->
pci_dev
,
CONFIG_RBPS_BUFSIZE
,
8
,
0
);
#endif
if
(
he_dev
->
rbps_pool
==
NULL
)
{
hprintk1
(
"unable to create rbps pages
\n
"
);
return
-
ENOMEM
;
}
#else
/* !USE_RBPS_POOL */
he_dev
->
rbps_pages
=
pci_alloc_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBPS_SIZE
*
CONFIG_RBPS_BUFSIZE
,
&
he_dev
->
rbps_pages_phys
);
if
(
he_dev
->
rbps_pages
==
NULL
)
{
hprintk1
(
"unable to create rbps page pool
\n
"
);
return
-
ENOMEM
;
}
#endif
/* USE_RBPS_POOL */
he_dev
->
rbps_base
=
pci_alloc_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBPS_SIZE
*
sizeof
(
struct
he_rbp
),
&
he_dev
->
rbps_phys
);
if
(
he_dev
->
rbps_base
==
NULL
)
{
hprintk1
(
"failed to alloc rbps
\n
"
);
return
-
ENOMEM
;
}
memset
(
he_dev
->
rbps_base
,
0
,
CONFIG_RBPS_SIZE
*
sizeof
(
struct
he_rbp
));
he_dev
->
rbps_virt
=
kmalloc
(
CONFIG_RBPS_SIZE
*
sizeof
(
struct
he_virt
),
GFP_KERNEL
);
for
(
i
=
0
;
i
<
CONFIG_RBPS_SIZE
;
++
i
)
{
dma_addr_t
dma_handle
;
void
*
cpuaddr
;
#ifdef USE_RBPS_POOL
cpuaddr
=
pci_pool_alloc
(
he_dev
->
rbps_pool
,
SLAB_KERNEL
|
SLAB_DMA
,
&
dma_handle
);
if
(
cpuaddr
==
NULL
)
return
-
ENOMEM
;
#else
cpuaddr
=
he_dev
->
rbps_pages
+
(
i
*
CONFIG_RBPS_BUFSIZE
);
dma_handle
=
he_dev
->
rbps_pages_phys
+
(
i
*
CONFIG_RBPS_BUFSIZE
);
#endif
he_dev
->
rbps_virt
[
i
].
virt
=
cpuaddr
;
he_dev
->
rbps_base
[
i
].
status
=
RBP_LOANED
|
RBP_SMALLBUF
|
(
i
<<
RBP_INDEX_OFF
);
he_dev
->
rbps_base
[
i
].
phys
=
dma_handle
;
}
he_dev
->
rbps_tail
=
&
he_dev
->
rbps_base
[
CONFIG_RBPS_SIZE
-
1
];
he_writel
(
he_dev
,
he_dev
->
rbps_phys
,
G0_RBPS_S
+
(
group
*
32
));
he_writel
(
he_dev
,
RBPS_MASK
(
he_dev
->
rbps_tail
),
G0_RBPS_T
+
(
group
*
32
));
he_writel
(
he_dev
,
CONFIG_RBPS_BUFSIZE
/
4
,
G0_RBPS_BS
+
(
group
*
32
));
he_writel
(
he_dev
,
RBP_THRESH
(
CONFIG_RBPS_THRESH
)
|
RBP_QSIZE
(
CONFIG_RBPS_SIZE
-
1
)
|
RBP_INT_ENB
,
G0_RBPS_QI
+
(
group
*
32
));
#else
/* !USE_RBPS */
he_writel
(
he_dev
,
0x0
,
G0_RBPS_S
+
(
group
*
32
));
he_writel
(
he_dev
,
0x0
,
G0_RBPS_T
+
(
group
*
32
));
he_writel
(
he_dev
,
0x0
,
G0_RBPS_QI
+
(
group
*
32
));
he_writel
(
he_dev
,
RBP_THRESH
(
0x1
)
|
RBP_QSIZE
(
0x0
),
G0_RBPS_BS
+
(
group
*
32
));
#endif
/* USE_RBPS */
/* large buffer pool */
#ifdef USE_RBPL_POOL
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
he_dev
->
rbpl_pool
=
pci_pool_create
(
"rbpl"
,
he_dev
->
pci_dev
,
CONFIG_RBPL_BUFSIZE
,
8
,
0
,
SLAB_KERNEL
);
#else
he_dev
->
rbpl_pool
=
pci_pool_create
(
"rbpl"
,
he_dev
->
pci_dev
,
CONFIG_RBPL_BUFSIZE
,
8
,
0
);
#endif
if
(
he_dev
->
rbpl_pool
==
NULL
)
{
hprintk1
(
"unable to create rbpl pool
\n
"
);
return
-
ENOMEM
;
}
#else
/* !USE_RBPL_POOL */
he_dev
->
rbpl_pages
=
(
void
*
)
pci_alloc_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBPL_SIZE
*
CONFIG_RBPL_BUFSIZE
,
&
he_dev
->
rbpl_pages_phys
);
if
(
he_dev
->
rbpl_pages
==
NULL
)
{
hprintk1
(
"unable to create rbpl pages
\n
"
);
return
-
ENOMEM
;
}
#endif
/* USE_RBPL_POOL */
he_dev
->
rbpl_base
=
pci_alloc_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBPL_SIZE
*
sizeof
(
struct
he_rbp
),
&
he_dev
->
rbpl_phys
);
if
(
he_dev
->
rbpl_base
==
NULL
)
{
hprintk1
(
"failed to alloc rbpl
\n
"
);
return
-
ENOMEM
;
}
memset
(
he_dev
->
rbpl_base
,
0
,
CONFIG_RBPL_SIZE
*
sizeof
(
struct
he_rbp
));
he_dev
->
rbpl_virt
=
kmalloc
(
CONFIG_RBPL_SIZE
*
sizeof
(
struct
he_virt
),
GFP_KERNEL
);
for
(
i
=
0
;
i
<
CONFIG_RBPL_SIZE
;
++
i
)
{
dma_addr_t
dma_handle
;
void
*
cpuaddr
;
#ifdef USE_RBPL_POOL
cpuaddr
=
pci_pool_alloc
(
he_dev
->
rbpl_pool
,
SLAB_KERNEL
|
SLAB_DMA
,
&
dma_handle
);
if
(
cpuaddr
==
NULL
)
return
-
ENOMEM
;
#else
cpuaddr
=
he_dev
->
rbpl_pages
+
(
i
*
CONFIG_RBPL_BUFSIZE
);
dma_handle
=
he_dev
->
rbpl_pages_phys
+
(
i
*
CONFIG_RBPL_BUFSIZE
);
#endif
he_dev
->
rbpl_virt
[
i
].
virt
=
cpuaddr
;
he_dev
->
rbpl_base
[
i
].
status
=
RBP_LOANED
|
(
i
<<
RBP_INDEX_OFF
);
he_dev
->
rbpl_base
[
i
].
phys
=
dma_handle
;
}
he_dev
->
rbpl_tail
=
&
he_dev
->
rbpl_base
[
CONFIG_RBPL_SIZE
-
1
];
he_writel
(
he_dev
,
he_dev
->
rbpl_phys
,
G0_RBPL_S
+
(
group
*
32
));
he_writel
(
he_dev
,
RBPL_MASK
(
he_dev
->
rbpl_tail
),
G0_RBPL_T
+
(
group
*
32
));
he_writel
(
he_dev
,
CONFIG_RBPL_BUFSIZE
/
4
,
G0_RBPL_BS
+
(
group
*
32
));
he_writel
(
he_dev
,
RBP_THRESH
(
CONFIG_RBPL_THRESH
)
|
RBP_QSIZE
(
CONFIG_RBPL_SIZE
-
1
)
|
RBP_INT_ENB
,
G0_RBPL_QI
+
(
group
*
32
));
/* rx buffer ready queue */
he_dev
->
rbrq_base
=
pci_alloc_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBRQ_SIZE
*
sizeof
(
struct
he_rbrq
),
&
he_dev
->
rbrq_phys
);
if
(
he_dev
->
rbrq_base
==
NULL
)
{
hprintk1
(
"failed to allocate rbrq
\n
"
);
return
-
ENOMEM
;
}
memset
(
he_dev
->
rbrq_base
,
0
,
CONFIG_RBRQ_SIZE
*
sizeof
(
struct
he_rbrq
));
he_dev
->
rbrq_head
=
he_dev
->
rbrq_base
;
he_writel
(
he_dev
,
he_dev
->
rbrq_phys
,
G0_RBRQ_ST
+
(
group
*
16
));
he_writel
(
he_dev
,
0
,
G0_RBRQ_H
+
(
group
*
16
));
he_writel
(
he_dev
,
RBRQ_THRESH
(
CONFIG_RBRQ_THRESH
)
|
RBRQ_SIZE
(
CONFIG_RBRQ_SIZE
-
1
),
G0_RBRQ_Q
+
(
group
*
16
));
if
(
irq_coalesce
)
{
hprintk1
(
"coalescing interrupts
\n
"
);
he_writel
(
he_dev
,
RBRQ_TIME
(
768
)
|
RBRQ_COUNT
(
7
),
G0_RBRQ_I
+
(
group
*
16
));
}
else
he_writel
(
he_dev
,
RBRQ_TIME
(
0
)
|
RBRQ_COUNT
(
1
),
G0_RBRQ_I
+
(
group
*
16
));
/* tx buffer ready queue */
he_dev
->
tbrq_base
=
pci_alloc_consistent
(
he_dev
->
pci_dev
,
CONFIG_TBRQ_SIZE
*
sizeof
(
struct
he_tbrq
),
&
he_dev
->
tbrq_phys
);
if
(
he_dev
->
tbrq_base
==
NULL
)
{
hprintk1
(
"failed to allocate tbrq
\n
"
);
return
-
ENOMEM
;
}
memset
(
he_dev
->
tbrq_base
,
0
,
CONFIG_TBRQ_SIZE
*
sizeof
(
struct
he_tbrq
));
he_dev
->
tbrq_head
=
he_dev
->
tbrq_base
;
he_writel
(
he_dev
,
he_dev
->
tbrq_phys
,
G0_TBRQ_B_T
+
(
group
*
16
));
he_writel
(
he_dev
,
0
,
G0_TBRQ_H
+
(
group
*
16
));
he_writel
(
he_dev
,
CONFIG_TBRQ_SIZE
-
1
,
G0_TBRQ_S
+
(
group
*
16
));
he_writel
(
he_dev
,
CONFIG_TBRQ_THRESH
,
G0_TBRQ_THRESH
+
(
group
*
16
));
return
0
;
}
static
int
__init
he_init_irq
(
struct
he_dev
*
he_dev
)
{
int
i
;
/* 2.9.3.5 tail offset for each interrupt queue is located after the
end of the interrupt queue */
he_dev
->
irq_base
=
pci_alloc_consistent
(
he_dev
->
pci_dev
,
(
CONFIG_IRQ_SIZE
+
1
)
*
sizeof
(
struct
he_irq
),
&
he_dev
->
irq_phys
);
if
(
he_dev
->
irq_base
==
NULL
)
{
hprintk1
(
"failed to allocate irq
\n
"
);
return
-
ENOMEM
;
}
he_dev
->
irq_tailoffset
=
(
unsigned
*
)
&
he_dev
->
irq_base
[
CONFIG_IRQ_SIZE
];
*
he_dev
->
irq_tailoffset
=
0
;
he_dev
->
irq_head
=
he_dev
->
irq_base
;
he_dev
->
irq_tail
=
he_dev
->
irq_base
;
for
(
i
=
0
;
i
<
CONFIG_IRQ_SIZE
;
++
i
)
he_dev
->
irq_base
[
i
].
isw
=
ITYPE_INVALID
;
he_writel
(
he_dev
,
he_dev
->
irq_phys
,
IRQ0_BASE
);
he_writel
(
he_dev
,
IRQ_SIZE
(
CONFIG_IRQ_SIZE
)
|
IRQ_THRESH
(
CONFIG_IRQ_THRESH
),
IRQ0_HEAD
);
he_writel
(
he_dev
,
IRQ_INT_A
|
IRQ_TYPE_LINE
,
IRQ0_CNTL
);
he_writel
(
he_dev
,
0x0
,
IRQ0_DATA
);
he_writel
(
he_dev
,
0x0
,
IRQ1_BASE
);
he_writel
(
he_dev
,
0x0
,
IRQ1_HEAD
);
he_writel
(
he_dev
,
0x0
,
IRQ1_CNTL
);
he_writel
(
he_dev
,
0x0
,
IRQ1_DATA
);
he_writel
(
he_dev
,
0x0
,
IRQ2_BASE
);
he_writel
(
he_dev
,
0x0
,
IRQ2_HEAD
);
he_writel
(
he_dev
,
0x0
,
IRQ2_CNTL
);
he_writel
(
he_dev
,
0x0
,
IRQ2_DATA
);
he_writel
(
he_dev
,
0x0
,
IRQ3_BASE
);
he_writel
(
he_dev
,
0x0
,
IRQ3_HEAD
);
he_writel
(
he_dev
,
0x0
,
IRQ3_CNTL
);
he_writel
(
he_dev
,
0x0
,
IRQ3_DATA
);
/* 2.9.3.2 interrupt queue mapping registers */
he_writel
(
he_dev
,
0x0
,
GRP_10_MAP
);
he_writel
(
he_dev
,
0x0
,
GRP_32_MAP
);
he_writel
(
he_dev
,
0x0
,
GRP_54_MAP
);
he_writel
(
he_dev
,
0x0
,
GRP_76_MAP
);
if
(
request_irq
(
he_dev
->
pci_dev
->
irq
,
he_irq_handler
,
SA_INTERRUPT
|
SA_SHIRQ
,
DEV_LABEL
,
he_dev
))
{
hprintk
(
"irq %d already in use
\n
"
,
he_dev
->
pci_dev
->
irq
);
return
-
EINVAL
;
}
he_dev
->
irq
=
he_dev
->
pci_dev
->
irq
;
#ifdef BUS_INT_WAR
HPRINTK
(
"sn_add_polled_interrupt(irq %d, 1)
\n
"
,
he_dev
->
irq
);
sn_add_polled_interrupt
(
he_dev
->
irq
,
1
);
#endif
return
0
;
}
static
int
__init
he_start
(
struct
atm_dev
*
dev
)
{
struct
he_dev
*
he_dev
;
struct
pci_dev
*
pci_dev
;
u16
command
;
u32
gen_cntl_0
,
host_cntl
,
lb_swap
;
u8
cache_size
,
timer
;
unsigned
err
;
unsigned
int
status
,
reg
;
int
i
,
group
;
he_dev
=
HE_DEV
(
dev
);
pci_dev
=
he_dev
->
pci_dev
;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,3)
he_dev
->
membase
=
pci_dev
->
resource
[
0
].
start
;
#else
he_dev
->
membase
=
pci_dev
->
base_address
[
0
]
&
PCI_BASE_ADDRESS_MEM_MASK
;
#endif
HPRINTK
(
"membase = 0x%lx irq = %d.
\n
"
,
he_dev
->
membase
,
pci_dev
->
irq
);
/*
* pci bus controller initialization
*/
/* 4.3 pci bus controller-specific initialization */
if
(
pci_read_config_dword
(
pci_dev
,
GEN_CNTL_0
,
&
gen_cntl_0
)
!=
0
)
{
hprintk1
(
"can't read GEN_CNTL_0
\n
"
);
return
-
EINVAL
;
}
gen_cntl_0
|=
(
MRL_ENB
|
MRM_ENB
|
IGNORE_TIMEOUT
);
if
(
pci_write_config_dword
(
pci_dev
,
GEN_CNTL_0
,
gen_cntl_0
)
!=
0
)
{
hprintk1
(
"can't write GEN_CNTL_0.
\n
"
);
return
-
EINVAL
;
}
if
(
pci_read_config_word
(
pci_dev
,
PCI_COMMAND
,
&
command
)
!=
0
)
{
hprintk1
(
"can't read PCI_COMMAND.
\n
"
);
return
-
EINVAL
;
}
command
|=
(
PCI_COMMAND_MEMORY
|
PCI_COMMAND_MASTER
|
PCI_COMMAND_INVALIDATE
);
if
(
pci_write_config_word
(
pci_dev
,
PCI_COMMAND
,
command
)
!=
0
)
{
hprintk1
(
"can't enable memory.
\n
"
);
return
-
EINVAL
;
}
if
(
pci_read_config_byte
(
pci_dev
,
PCI_CACHE_LINE_SIZE
,
&
cache_size
))
{
hprintk1
(
"can't read cache line size?
\n
"
);
return
-
EINVAL
;
}
if
(
cache_size
<
16
)
{
cache_size
=
16
;
if
(
pci_write_config_byte
(
pci_dev
,
PCI_CACHE_LINE_SIZE
,
cache_size
))
hprintk
(
"can't set cache line size to %d
\n
"
,
cache_size
);
}
if
(
pci_read_config_byte
(
pci_dev
,
PCI_LATENCY_TIMER
,
&
timer
))
{
hprintk1
(
"can't read latency timer?
\n
"
);
return
-
EINVAL
;
}
/* from table 3.9
*
* LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
*
* AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
* BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
*
*/
#define LAT_TIMER 209
if
(
timer
<
LAT_TIMER
)
{
HPRINTK
(
"latency timer was %d, setting to %d
\n
"
,
timer
,
LAT_TIMER
);
timer
=
LAT_TIMER
;
if
(
pci_write_config_byte
(
pci_dev
,
PCI_LATENCY_TIMER
,
timer
))
hprintk
(
"can't set latency timer to %d
\n
"
,
timer
);
}
if
(
!
(
he_dev
->
membase
=
(
unsigned
long
)
ioremap
(
he_dev
->
membase
,
HE_REGMAP_SIZE
)))
{
hprintk1
(
"can't set up page mapping
\n
"
);
return
-
EINVAL
;
}
/* 4.4 card reset */
he_writel
(
he_dev
,
0x0
,
RESET_CNTL
);
he_writel
(
he_dev
,
0xff
,
RESET_CNTL
);
udelay
(
16
*
1000
);
/* 16 ms */
status
=
he_readl
(
he_dev
,
RESET_CNTL
);
if
((
status
&
BOARD_RST_STATUS
)
==
0
)
{
hprintk1
(
"reset failed
\n
"
);
return
-
EINVAL
;
}
/* 4.5 set bus width */
host_cntl
=
he_readl
(
he_dev
,
HOST_CNTL
);
if
(
host_cntl
&
PCI_BUS_SIZE64
)
gen_cntl_0
|=
ENBL_64
;
else
gen_cntl_0
&=
~
ENBL_64
;
if
(
disable64
==
1
)
{
hprintk1
(
"disabling 64-bit pci bus transfers
\n
"
);
gen_cntl_0
&=
~
ENBL_64
;
}
if
(
gen_cntl_0
&
ENBL_64
)
hprintk1
(
"64-bit transfers enabled
\n
"
);
pci_write_config_dword
(
pci_dev
,
GEN_CNTL_0
,
gen_cntl_0
);
/* 4.7 read prom contents */
for
(
i
=
0
;
i
<
PROD_ID_LEN
;
++
i
)
he_dev
->
prod_id
[
i
]
=
read_prom_byte
(
he_dev
,
PROD_ID
+
i
);
he_dev
->
media
=
read_prom_byte
(
he_dev
,
MEDIA
);
for
(
i
=
0
;
i
<
6
;
++
i
)
dev
->
esi
[
i
]
=
read_prom_byte
(
he_dev
,
MAC_ADDR
+
i
);
hprintk
(
"%s%s, %x:%x:%x:%x:%x:%x
\n
"
,
he_dev
->
prod_id
,
he_dev
->
media
&
0x40
?
"SM"
:
"MM"
,
dev
->
esi
[
0
],
dev
->
esi
[
1
],
dev
->
esi
[
2
],
dev
->
esi
[
3
],
dev
->
esi
[
4
],
dev
->
esi
[
5
]);
he_dev
->
atm_dev
->
link_rate
=
he_is622
(
he_dev
)
?
ATM_OC12_PCR
:
ATM_OC3_PCR
;
/* 4.6 set host endianess */
lb_swap
=
he_readl
(
he_dev
,
LB_SWAP
);
if
(
he_is622
(
he_dev
))
lb_swap
&=
~
XFER_SIZE
;
/* 4 cells */
else
lb_swap
|=
XFER_SIZE
;
/* 8 cells */
#ifdef __BIG_ENDIAN
lb_swap
|=
DESC_WR_SWAP
|
INTR_SWAP
|
BIG_ENDIAN_HOST
;
#else
lb_swap
&=
~
(
DESC_WR_SWAP
|
INTR_SWAP
|
BIG_ENDIAN_HOST
|
DATA_WR_SWAP
|
DATA_RD_SWAP
|
DESC_RD_SWAP
);
#endif
/* __BIG_ENDIAN */
he_writel
(
he_dev
,
lb_swap
,
LB_SWAP
);
/* 4.8 sdram controller initialization */
he_writel
(
he_dev
,
he_is622
(
he_dev
)
?
LB_64_ENB
:
0x0
,
SDRAM_CTL
);
/* 4.9 initialize rnum value */
lb_swap
|=
SWAP_RNUM_MAX
(
0xf
);
he_writel
(
he_dev
,
lb_swap
,
LB_SWAP
);
/* 4.10 initialize the interrupt queues */
if
((
err
=
he_init_irq
(
he_dev
))
!=
0
)
return
err
;
#ifdef USE_TASKLET
tasklet_init
(
&
he_dev
->
tasklet
,
he_tasklet
,
(
unsigned
long
)
he_dev
);
#endif
spin_lock_init
(
&
he_dev
->
global_lock
);
/* 4.11 enable pci bus controller state machines */
host_cntl
|=
(
OUTFF_ENB
|
CMDFF_ENB
|
QUICK_RD_RETRY
|
QUICK_WR_RETRY
|
PERR_INT_ENB
);
he_writel
(
he_dev
,
host_cntl
,
HOST_CNTL
);
gen_cntl_0
|=
INT_PROC_ENBL
|
INIT_ENB
;
pci_write_config_dword
(
pci_dev
,
GEN_CNTL_0
,
gen_cntl_0
);
/*
* atm network controller initialization
*/
/* 5.1.1 generic configuration state */
/*
* local (cell) buffer memory map
*
* HE155 HE622
*
* 0 ____________1023 bytes 0 _______________________2047 bytes
* | | | | |
* | utility | | rx0 | |
* 5|____________| 255|___________________| u |
* 6| | 256| | t |
* | | | | i |
* | rx0 | row | tx | l |
* | | | | i |
* | | 767|___________________| t |
* 517|____________| 768| | y |
* row 518| | | rx1 | |
* | | 1023|___________________|___|
* | |
* | tx |
* | |
* | |
* 1535|____________|
* 1536| |
* | rx1 |
* 2047|____________|
*
*/
/* total 4096 connections */
he_dev
->
vcibits
=
CONFIG_DEFAULT_VCIBITS
;
he_dev
->
vpibits
=
CONFIG_DEFAULT_VPIBITS
;
if
(
nvpibits
!=
-
1
&&
nvcibits
!=
-
1
&&
nvpibits
+
nvcibits
!=
HE_MAXCIDBITS
)
{
hprintk
(
"nvpibits + nvcibits != %d
\n
"
,
HE_MAXCIDBITS
);
return
-
ENODEV
;
}
if
(
nvpibits
!=
-
1
)
{
he_dev
->
vpibits
=
nvpibits
;
he_dev
->
vcibits
=
HE_MAXCIDBITS
-
nvpibits
;
}
if
(
nvcibits
!=
-
1
)
{
he_dev
->
vcibits
=
nvcibits
;
he_dev
->
vpibits
=
HE_MAXCIDBITS
-
nvcibits
;
}
if
(
he_is622
(
he_dev
))
{
he_dev
->
cells_per_row
=
40
;
he_dev
->
bytes_per_row
=
2048
;
he_dev
->
r0_numrows
=
256
;
he_dev
->
tx_numrows
=
512
;
he_dev
->
r1_numrows
=
256
;
he_dev
->
r0_startrow
=
0
;
he_dev
->
tx_startrow
=
256
;
he_dev
->
r1_startrow
=
768
;
}
else
{
he_dev
->
cells_per_row
=
20
;
he_dev
->
bytes_per_row
=
1024
;
he_dev
->
r0_numrows
=
512
;
he_dev
->
tx_numrows
=
1018
;
he_dev
->
r1_numrows
=
512
;
he_dev
->
r0_startrow
=
6
;
he_dev
->
tx_startrow
=
518
;
he_dev
->
r1_startrow
=
1536
;
}
he_dev
->
cells_per_lbuf
=
4
;
he_dev
->
buffer_limit
=
4
;
he_dev
->
r0_numbuffs
=
he_dev
->
r0_numrows
*
he_dev
->
cells_per_row
/
he_dev
->
cells_per_lbuf
;
if
(
he_dev
->
r0_numbuffs
>
2560
)
he_dev
->
r0_numbuffs
=
2560
;
he_dev
->
r1_numbuffs
=
he_dev
->
r1_numrows
*
he_dev
->
cells_per_row
/
he_dev
->
cells_per_lbuf
;
if
(
he_dev
->
r1_numbuffs
>
2560
)
he_dev
->
r1_numbuffs
=
2560
;
he_dev
->
tx_numbuffs
=
he_dev
->
tx_numrows
*
he_dev
->
cells_per_row
/
he_dev
->
cells_per_lbuf
;
if
(
he_dev
->
tx_numbuffs
>
5120
)
he_dev
->
tx_numbuffs
=
5120
;
/* 5.1.2 configure hardware dependent registers */
he_writel
(
he_dev
,
SLICE_X
(
0x2
)
|
ARB_RNUM_MAX
(
0xf
)
|
TH_PRTY
(
0x3
)
|
RH_PRTY
(
0x3
)
|
TL_PRTY
(
0x2
)
|
RL_PRTY
(
0x1
)
|
(
he_is622
(
he_dev
)
?
BUS_MULTI
(
0x28
)
:
BUS_MULTI
(
0x46
))
|
(
he_is622
(
he_dev
)
?
NET_PREF
(
0x50
)
:
NET_PREF
(
0x8c
)),
LBARB
);
he_writel
(
he_dev
,
BANK_ON
|
(
he_is622
(
he_dev
)
?
(
REF_RATE
(
0x384
)
|
WIDE_DATA
)
:
REF_RATE
(
0x150
)),
SDRAMCON
);
he_writel
(
he_dev
,
(
he_is622
(
he_dev
)
?
RM_BANK_WAIT
(
1
)
:
RM_BANK_WAIT
(
0
))
|
RM_RW_WAIT
(
1
),
RCMCONFIG
);
he_writel
(
he_dev
,
(
he_is622
(
he_dev
)
?
TM_BANK_WAIT
(
2
)
:
TM_BANK_WAIT
(
1
))
|
TM_RW_WAIT
(
1
),
TCMCONFIG
);
he_writel
(
he_dev
,
he_dev
->
cells_per_lbuf
*
ATM_CELL_PAYLOAD
,
LB_CONFIG
);
he_writel
(
he_dev
,
(
he_is622
(
he_dev
)
?
UT_RD_DELAY
(
8
)
:
UT_RD_DELAY
(
0
))
|
(
he_is622
(
he_dev
)
?
RC_UT_MODE
(
0
)
:
RC_UT_MODE
(
1
))
|
RX_VALVP
(
he_dev
->
vpibits
)
|
RX_VALVC
(
he_dev
->
vcibits
),
RC_CONFIG
);
he_writel
(
he_dev
,
DRF_THRESH
(
0x20
)
|
(
he_is622
(
he_dev
)
?
TX_UT_MODE
(
0
)
:
TX_UT_MODE
(
1
))
|
TX_VCI_MASK
(
he_dev
->
vcibits
)
|
LBFREE_CNT
(
he_dev
->
tx_numbuffs
),
TX_CONFIG
);
he_writel
(
he_dev
,
0x0
,
TXAAL5_PROTO
);
he_writel
(
he_dev
,
PHY_INT_ENB
|
(
he_is622
(
he_dev
)
?
PTMR_PRE
(
67
-
1
)
:
PTMR_PRE
(
50
-
1
)),
RH_CONFIG
);
/* 5.1.3 initialize connection memory */
for
(
i
=
0
;
i
<
TCM_MEM_SIZE
;
++
i
)
he_writel_tcm
(
he_dev
,
0
,
i
);
for
(
i
=
0
;
i
<
RCM_MEM_SIZE
;
++
i
)
he_writel_rcm
(
he_dev
,
0
,
i
);
/*
* transmit connection memory map
*
* tx memory
* 0x0 ___________________
* | |
* | |
* | TSRa |
* | |
* | |
* 0x8000|___________________|
* | |
* | TSRb |
* 0xc000|___________________|
* | |
* | TSRc |
* 0xe000|___________________|
* | TSRd |
* 0xf000|___________________|
* | tmABR |
* 0x10000|___________________|
* | |
* | tmTPD |
* |___________________|
* | |
* ....
* 0x1ffff|___________________|
*
*
*/
he_writel
(
he_dev
,
CONFIG_TSRB
,
TSRB_BA
);
he_writel
(
he_dev
,
CONFIG_TSRC
,
TSRC_BA
);
he_writel
(
he_dev
,
CONFIG_TSRD
,
TSRD_BA
);
he_writel
(
he_dev
,
CONFIG_TMABR
,
TMABR_BA
);
he_writel
(
he_dev
,
CONFIG_TPDBA
,
TPD_BA
);
/*
* receive connection memory map
*
* 0x0 ___________________
* | |
* | |
* | RSRa |
* | |
* | |
* 0x8000|___________________|
* | |
* | rx0/1 |
* | LBM | link lists of local
* | tx | buffer memory
* | |
* 0xd000|___________________|
* | |
* | rmABR |
* 0xe000|___________________|
* | |
* | RSRb |
* |___________________|
* | |
* ....
* 0xffff|___________________|
*/
he_writel
(
he_dev
,
0x08000
,
RCMLBM_BA
);
he_writel
(
he_dev
,
0x0e000
,
RCMRSRB_BA
);
he_writel
(
he_dev
,
0x0d800
,
RCMABR_BA
);
/* 5.1.4 initialize local buffer free pools linked lists */
he_init_rx_lbfp0
(
he_dev
);
he_init_rx_lbfp1
(
he_dev
);
he_writel
(
he_dev
,
0x0
,
RLBC_H
);
he_writel
(
he_dev
,
0x0
,
RLBC_T
);
he_writel
(
he_dev
,
0x0
,
RLBC_H2
);
he_writel
(
he_dev
,
512
,
RXTHRSH
);
/* 10% of r0+r1 buffers */
he_writel
(
he_dev
,
256
,
LITHRSH
);
/* 5% of r0+r1 buffers */
he_init_tx_lbfp
(
he_dev
);
he_writel
(
he_dev
,
he_is622
(
he_dev
)
?
0x104780
:
0x800
,
UBUFF_BA
);
/* 5.1.5 initialize intermediate receive queues */
if
(
he_is622
(
he_dev
))
{
he_writel
(
he_dev
,
0x000f
,
G0_INMQ_S
);
he_writel
(
he_dev
,
0x200f
,
G0_INMQ_L
);
he_writel
(
he_dev
,
0x001f
,
G1_INMQ_S
);
he_writel
(
he_dev
,
0x201f
,
G1_INMQ_L
);
he_writel
(
he_dev
,
0x002f
,
G2_INMQ_S
);
he_writel
(
he_dev
,
0x202f
,
G2_INMQ_L
);
he_writel
(
he_dev
,
0x003f
,
G3_INMQ_S
);
he_writel
(
he_dev
,
0x203f
,
G3_INMQ_L
);
he_writel
(
he_dev
,
0x004f
,
G4_INMQ_S
);
he_writel
(
he_dev
,
0x204f
,
G4_INMQ_L
);
he_writel
(
he_dev
,
0x005f
,
G5_INMQ_S
);
he_writel
(
he_dev
,
0x205f
,
G5_INMQ_L
);
he_writel
(
he_dev
,
0x006f
,
G6_INMQ_S
);
he_writel
(
he_dev
,
0x206f
,
G6_INMQ_L
);
he_writel
(
he_dev
,
0x007f
,
G7_INMQ_S
);
he_writel
(
he_dev
,
0x207f
,
G7_INMQ_L
);
}
else
{
he_writel
(
he_dev
,
0x0000
,
G0_INMQ_S
);
he_writel
(
he_dev
,
0x0008
,
G0_INMQ_L
);
he_writel
(
he_dev
,
0x0001
,
G1_INMQ_S
);
he_writel
(
he_dev
,
0x0009
,
G1_INMQ_L
);
he_writel
(
he_dev
,
0x0002
,
G2_INMQ_S
);
he_writel
(
he_dev
,
0x000a
,
G2_INMQ_L
);
he_writel
(
he_dev
,
0x0003
,
G3_INMQ_S
);
he_writel
(
he_dev
,
0x000b
,
G3_INMQ_L
);
he_writel
(
he_dev
,
0x0004
,
G4_INMQ_S
);
he_writel
(
he_dev
,
0x000c
,
G4_INMQ_L
);
he_writel
(
he_dev
,
0x0005
,
G5_INMQ_S
);
he_writel
(
he_dev
,
0x000d
,
G5_INMQ_L
);
he_writel
(
he_dev
,
0x0006
,
G6_INMQ_S
);
he_writel
(
he_dev
,
0x000e
,
G6_INMQ_L
);
he_writel
(
he_dev
,
0x0007
,
G7_INMQ_S
);
he_writel
(
he_dev
,
0x000f
,
G7_INMQ_L
);
}
/* 5.1.6 application tunable parameters */
he_writel
(
he_dev
,
0x0
,
MCC
);
he_writel
(
he_dev
,
0x0
,
OEC
);
he_writel
(
he_dev
,
0x0
,
DCC
);
he_writel
(
he_dev
,
0x0
,
CEC
);
/* 5.1.7 cs block initialization */
he_init_cs_block
(
he_dev
);
/* 5.1.8 cs block connection memory initialization */
he_init_cs_block_rcm
(
he_dev
);
/* 5.1.10 initialize host structures */
he_init_tpdrq
(
he_dev
);
#ifdef USE_TPD_POOL
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
he_dev
->
tpd_pool
=
pci_pool_create
(
"tpd"
,
he_dev
->
pci_dev
,
sizeof
(
struct
he_tpd
),
TPD_ALIGNMENT
,
0
,
SLAB_KERNEL
);
#else
he_dev
->
tpd_pool
=
pci_pool_create
(
"tpd"
,
he_dev
->
pci_dev
,
sizeof
(
struct
he_tpd
),
TPD_ALIGNMENT
,
0
);
#endif
if
(
he_dev
->
tpd_pool
==
NULL
)
{
hprintk1
(
"unable to create tpd pci_pool
\n
"
);
return
-
ENOMEM
;
}
INIT_LIST_HEAD
(
&
he_dev
->
outstanding_tpds
);
#else
he_dev
->
tpd_base
=
(
void
*
)
pci_alloc_consistent
(
he_dev
->
pci_dev
,
CONFIG_NUMTPDS
*
sizeof
(
struct
he_tpd
),
&
he_dev
->
tpd_base_phys
);
if
(
!
he_dev
->
tpd_base
)
return
-
ENOMEM
;
for
(
i
=
0
;
i
<
CONFIG_NUMTPDS
;
++
i
)
{
he_dev
->
tpd_base
[
i
].
status
=
(
i
<<
TPD_ADDR_SHIFT
);
he_dev
->
tpd_base
[
i
].
inuse
=
0
;
}
he_dev
->
tpd_head
=
he_dev
->
tpd_base
;
he_dev
->
tpd_end
=
&
he_dev
->
tpd_base
[
CONFIG_NUMTPDS
-
1
];
#endif
if
(
he_init_group
(
he_dev
,
0
)
!=
0
)
return
-
ENOMEM
;
for
(
group
=
1
;
group
<
HE_NUM_GROUPS
;
++
group
)
{
he_writel
(
he_dev
,
0x0
,
G0_RBPS_S
+
(
group
*
32
));
he_writel
(
he_dev
,
0x0
,
G0_RBPS_T
+
(
group
*
32
));
he_writel
(
he_dev
,
0x0
,
G0_RBPS_QI
+
(
group
*
32
));
he_writel
(
he_dev
,
RBP_THRESH
(
0x1
)
|
RBP_QSIZE
(
0x0
),
G0_RBPS_BS
+
(
group
*
32
));
he_writel
(
he_dev
,
0x0
,
G0_RBPL_S
+
(
group
*
32
));
he_writel
(
he_dev
,
0x0
,
G0_RBPL_T
+
(
group
*
32
));
he_writel
(
he_dev
,
RBP_THRESH
(
0x1
)
|
RBP_QSIZE
(
0x0
),
G0_RBPL_QI
+
(
group
*
32
));
he_writel
(
he_dev
,
0x0
,
G0_RBPL_BS
+
(
group
*
32
));
he_writel
(
he_dev
,
0x0
,
G0_RBRQ_ST
+
(
group
*
16
));
he_writel
(
he_dev
,
0x0
,
G0_RBRQ_H
+
(
group
*
16
));
he_writel
(
he_dev
,
RBRQ_THRESH
(
0x1
)
|
RBRQ_SIZE
(
0x0
),
G0_RBRQ_Q
+
(
group
*
16
));
he_writel
(
he_dev
,
0x0
,
G0_RBRQ_I
+
(
group
*
16
));
he_writel
(
he_dev
,
0x0
,
G0_TBRQ_B_T
+
(
group
*
16
));
he_writel
(
he_dev
,
0x0
,
G0_TBRQ_H
+
(
group
*
16
));
he_writel
(
he_dev
,
TBRQ_THRESH
(
0x1
),
G0_TBRQ_THRESH
+
(
group
*
16
));
he_writel
(
he_dev
,
0x0
,
G0_TBRQ_S
+
(
group
*
16
));
}
/* host status page */
he_dev
->
hsp
=
pci_alloc_consistent
(
he_dev
->
pci_dev
,
sizeof
(
struct
he_hsp
),
&
he_dev
->
hsp_phys
);
if
(
he_dev
->
hsp
==
NULL
)
{
hprintk1
(
"failed to allocate host status page
\n
"
);
return
-
ENOMEM
;
}
memset
(
he_dev
->
hsp
,
0
,
sizeof
(
struct
he_hsp
));
he_writel
(
he_dev
,
he_dev
->
hsp_phys
,
HSP_BA
);
/* initialize framer */
#ifdef CONFIG_ATM_HE_USE_SUNI
suni_init
(
he_dev
->
atm_dev
);
if
(
he_dev
->
atm_dev
->
phy
&&
he_dev
->
atm_dev
->
phy
->
start
)
he_dev
->
atm_dev
->
phy
->
start
(
he_dev
->
atm_dev
);
#endif
/* CONFIG_ATM_HE_USE_SUNI */
if
(
sdh
)
{
/* this really should be in suni.c but for now... */
int
val
;
val
=
he_phy_get
(
he_dev
->
atm_dev
,
SUNI_TPOP_APM
);
val
=
(
val
&
~
SUNI_TPOP_APM_S
)
|
(
0x2
<<
SUNI_TPOP_APM_S_SHIFT
);
he_phy_put
(
he_dev
->
atm_dev
,
val
,
SUNI_TPOP_APM
);
}
/* 5.1.12 enable transmit and receive */
reg
=
he_readl_mbox
(
he_dev
,
CS_ERCTL0
);
reg
|=
TX_ENABLE
|
ER_ENABLE
;
he_writel_mbox
(
he_dev
,
reg
,
CS_ERCTL0
);
reg
=
he_readl
(
he_dev
,
RC_CONFIG
);
reg
|=
RX_ENABLE
;
he_writel
(
he_dev
,
reg
,
RC_CONFIG
);
#ifndef USE_HE_FIND_VCC
he_dev
->
he_vcc_table
=
kmalloc
(
sizeof
(
struct
he_vcc_table
)
*
(
1
<<
(
he_dev
->
vcibits
+
he_dev
->
vpibits
)),
GFP_KERNEL
);
if
(
he_dev
->
he_vcc_table
==
NULL
)
{
hprintk1
(
"failed to alloc he_vcc_table
\n
"
);
return
-
ENOMEM
;
}
memset
(
he_dev
->
he_vcc_table
,
0
,
sizeof
(
struct
he_vcc_table
)
*
(
1
<<
(
he_dev
->
vcibits
+
he_dev
->
vpibits
)));
#endif
for
(
i
=
0
;
i
<
HE_NUM_CS_STPER
;
++
i
)
{
he_dev
->
cs_stper
[
i
].
inuse
=
0
;
he_dev
->
cs_stper
[
i
].
pcr
=
-
1
;
}
he_dev
->
total_bw
=
0
;
/* atm linux initialization */
he_dev
->
atm_dev
->
ci_range
.
vpi_bits
=
he_dev
->
vpibits
;
he_dev
->
atm_dev
->
ci_range
.
vci_bits
=
he_dev
->
vcibits
;
he_dev
->
irq_peak
=
0
;
he_dev
->
rbrq_peak
=
0
;
he_dev
->
rbpl_peak
=
0
;
he_dev
->
tbrq_peak
=
0
;
HPRINTK
(
"hell bent for leather!
\n
"
);
return
0
;
}
static
void
he_stop
(
struct
he_dev
*
he_dev
)
{
u16
command
;
u32
gen_cntl_0
,
reg
;
struct
pci_dev
*
pci_dev
;
pci_dev
=
he_dev
->
pci_dev
;
/* disable interrupts */
if
(
he_dev
->
membase
)
{
pci_read_config_dword
(
pci_dev
,
GEN_CNTL_0
,
&
gen_cntl_0
);
gen_cntl_0
&=
~
(
INT_PROC_ENBL
|
INIT_ENB
);
pci_write_config_dword
(
pci_dev
,
GEN_CNTL_0
,
gen_cntl_0
);
#ifdef USE_TASKLET
tasklet_disable
(
&
he_dev
->
tasklet
);
#endif
/* disable recv and transmit */
reg
=
he_readl_mbox
(
he_dev
,
CS_ERCTL0
);
reg
&=
~
(
TX_ENABLE
|
ER_ENABLE
);
he_writel_mbox
(
he_dev
,
reg
,
CS_ERCTL0
);
reg
=
he_readl
(
he_dev
,
RC_CONFIG
);
reg
&=
~
(
RX_ENABLE
);
he_writel
(
he_dev
,
reg
,
RC_CONFIG
);
}
#ifdef CONFIG_ATM_HE_USE_SUNI
if
(
he_dev
->
atm_dev
->
phy
&&
he_dev
->
atm_dev
->
phy
->
stop
)
he_dev
->
atm_dev
->
phy
->
stop
(
he_dev
->
atm_dev
);
#endif
/* CONFIG_ATM_HE_USE_SUNI */
if
(
he_dev
->
irq
)
{
#ifdef BUS_INT_WAR
sn_delete_polled_interrupt
(
he_dev
->
irq
);
#endif
free_irq
(
he_dev
->
irq
,
he_dev
);
}
if
(
he_dev
->
irq_base
)
pci_free_consistent
(
he_dev
->
pci_dev
,
(
CONFIG_IRQ_SIZE
+
1
)
*
sizeof
(
struct
he_irq
),
he_dev
->
irq_base
,
he_dev
->
irq_phys
);
if
(
he_dev
->
hsp
)
pci_free_consistent
(
he_dev
->
pci_dev
,
sizeof
(
struct
he_hsp
),
he_dev
->
hsp
,
he_dev
->
hsp_phys
);
if
(
he_dev
->
rbpl_base
)
{
#ifdef USE_RBPL_POOL
for
(
i
=
0
;
i
<
CONFIG_RBPL_SIZE
;
++
i
)
{
void
*
cpuaddr
=
he_dev
->
rbpl_virt
[
i
].
virt
;
dma_addr_t
dma_handle
=
he_dev
->
rbpl_base
[
i
].
phys
;
pci_pool_free
(
he_dev
->
rbpl_pool
,
cpuaddr
,
dma_handle
);
}
#else
pci_free_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBPL_SIZE
*
CONFIG_RBPL_BUFSIZE
,
he_dev
->
rbpl_pages
,
he_dev
->
rbpl_pages_phys
);
#endif
pci_free_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBPL_SIZE
*
sizeof
(
struct
he_rbp
),
he_dev
->
rbpl_base
,
he_dev
->
rbpl_phys
);
}
#ifdef USE_RBPL_POOL
if
(
he_dev
->
rbpl_pool
)
pci_pool_destroy
(
he_dev
->
rbpl_pool
);
#endif
#ifdef USE_RBPS
if
(
he_dev
->
rbps_base
)
{
#ifdef USE_RBPS_POOL
for
(
i
=
0
;
i
<
CONFIG_RBPS_SIZE
;
++
i
)
{
void
*
cpuaddr
=
he_dev
->
rbps_virt
[
i
].
virt
;
dma_addr_t
dma_handle
=
he_dev
->
rbps_base
[
i
].
phys
;
pci_pool_free
(
he_dev
->
rbps_pool
,
cpuaddr
,
dma_handle
);
}
#else
pci_free_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBPS_SIZE
*
CONFIG_RBPS_BUFSIZE
,
he_dev
->
rbps_pages
,
he_dev
->
rbps_pages_phys
);
#endif
pci_free_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBPS_SIZE
*
sizeof
(
struct
he_rbp
),
he_dev
->
rbps_base
,
he_dev
->
rbps_phys
);
}
#ifdef USE_RBPS_POOL
if
(
he_dev
->
rbps_pool
)
pci_pool_destroy
(
he_dev
->
rbps_pool
);
#endif
#endif
/* USE_RBPS */
if
(
he_dev
->
rbrq_base
)
pci_free_consistent
(
he_dev
->
pci_dev
,
CONFIG_RBRQ_SIZE
*
sizeof
(
struct
he_rbrq
),
he_dev
->
rbrq_base
,
he_dev
->
rbrq_phys
);
if
(
he_dev
->
tbrq_base
)
pci_free_consistent
(
he_dev
->
pci_dev
,
CONFIG_TBRQ_SIZE
*
sizeof
(
struct
he_tbrq
),
he_dev
->
tbrq_base
,
he_dev
->
tbrq_phys
);
if
(
he_dev
->
tpdrq_base
)
pci_free_consistent
(
he_dev
->
pci_dev
,
CONFIG_TBRQ_SIZE
*
sizeof
(
struct
he_tbrq
),
he_dev
->
tpdrq_base
,
he_dev
->
tpdrq_phys
);
#ifdef USE_TPD_POOL
if
(
he_dev
->
tpd_pool
)
pci_pool_destroy
(
he_dev
->
tpd_pool
);
#else
if
(
he_dev
->
tpd_base
)
pci_free_consistent
(
he_dev
->
pci_dev
,
CONFIG_NUMTPDS
*
sizeof
(
struct
he_tpd
),
he_dev
->
tpd_base
,
he_dev
->
tpd_base_phys
);
#endif
#ifndef USE_HE_FIND_VCC
if
(
he_dev
->
he_vcc_table
)
kfree
(
he_dev
->
he_vcc_table
);
#endif
if
(
he_dev
->
pci_dev
)
{
pci_read_config_word
(
he_dev
->
pci_dev
,
PCI_COMMAND
,
&
command
);
command
&=
~
(
PCI_COMMAND_MEMORY
|
PCI_COMMAND_MASTER
);
pci_write_config_word
(
he_dev
->
pci_dev
,
PCI_COMMAND
,
command
);
}
if
(
he_dev
->
membase
)
iounmap
((
void
*
)
he_dev
->
membase
);
}
static
struct
he_tpd
*
__alloc_tpd
(
struct
he_dev
*
he_dev
)
{
#ifdef USE_TPD_POOL
struct
he_tpd
*
tpd
;
dma_addr_t
dma_handle
;
tpd
=
pci_pool_alloc
(
he_dev
->
tpd_pool
,
SLAB_ATOMIC
|
SLAB_DMA
,
&
dma_handle
);
if
(
tpd
==
NULL
)
return
NULL
;
tpd
->
status
=
TPD_ADDR
(
dma_handle
);
tpd
->
reserved
=
0
;
tpd
->
iovec
[
0
].
addr
=
0
;
tpd
->
iovec
[
0
].
len
=
0
;
tpd
->
iovec
[
1
].
addr
=
0
;
tpd
->
iovec
[
1
].
len
=
0
;
tpd
->
iovec
[
2
].
addr
=
0
;
tpd
->
iovec
[
2
].
len
=
0
;
return
tpd
;
#else
int
i
;
for
(
i
=
0
;
i
<
CONFIG_NUMTPDS
;
++
i
)
{
++
he_dev
->
tpd_head
;
if
(
he_dev
->
tpd_head
>
he_dev
->
tpd_end
)
{
he_dev
->
tpd_head
=
he_dev
->
tpd_base
;
}
if
(
!
he_dev
->
tpd_head
->
inuse
)
{
he_dev
->
tpd_head
->
inuse
=
1
;
he_dev
->
tpd_head
->
status
&=
TPD_MASK
;
he_dev
->
tpd_head
->
iovec
[
0
].
addr
=
0
;
he_dev
->
tpd_head
->
iovec
[
0
].
len
=
0
;
he_dev
->
tpd_head
->
iovec
[
1
].
addr
=
0
;
he_dev
->
tpd_head
->
iovec
[
1
].
len
=
0
;
he_dev
->
tpd_head
->
iovec
[
2
].
addr
=
0
;
he_dev
->
tpd_head
->
iovec
[
2
].
len
=
0
;
return
he_dev
->
tpd_head
;
}
}
hprintk
(
"out of tpds -- increase CONFIG_NUMTPDS (%d)
\n
"
,
CONFIG_NUMTPDS
);
return
NULL
;
#endif
}
#define AAL5_LEN(buf,len) \
((((unsigned char *)(buf))[(len)-6]<<8) | \
(((unsigned char *)(buf))[(len)-5]))
/* 2.10.1.2 receive
*
* aal5 packets can optionally return the tcp checksum in the lower
* 16 bits of the crc (RSR0_TCP_CKSUM)
*/
#define TCP_CKSUM(buf,len) \
((((unsigned char *)(buf))[(len)-2]<<8) | \
(((unsigned char *)(buf))[(len-1)]))
static
int
he_service_rbrq
(
struct
he_dev
*
he_dev
,
int
group
)
{
struct
he_rbrq
*
rbrq_tail
=
(
struct
he_rbrq
*
)
((
unsigned
long
)
he_dev
->
rbrq_base
|
he_dev
->
hsp
->
group
[
group
].
rbrq_tail
);
struct
he_rbp
*
rbp
=
NULL
;
unsigned
cid
,
lastcid
=
-
1
;
unsigned
buf_len
=
0
;
struct
sk_buff
*
skb
;
struct
atm_vcc
*
vcc
=
NULL
;
struct
he_vcc
*
he_vcc
;
struct
iovec
*
iov
;
int
pdus_assembled
=
0
;
int
updated
=
0
;
while
(
he_dev
->
rbrq_head
!=
rbrq_tail
)
{
++
updated
;
HPRINTK
(
"%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s
\n
"
,
he_dev
->
rbrq_head
,
group
,
RBRQ_ADDR
(
he_dev
->
rbrq_head
),
RBRQ_BUFLEN
(
he_dev
->
rbrq_head
),
RBRQ_CID
(
he_dev
->
rbrq_head
),
RBRQ_CRC_ERR
(
he_dev
->
rbrq_head
)
?
" CRC_ERR"
:
""
,
RBRQ_LEN_ERR
(
he_dev
->
rbrq_head
)
?
" LEN_ERR"
:
""
,
RBRQ_END_PDU
(
he_dev
->
rbrq_head
)
?
" END_PDU"
:
""
,
RBRQ_AAL5_PROT
(
he_dev
->
rbrq_head
)
?
" AAL5_PROT"
:
""
,
RBRQ_CON_CLOSED
(
he_dev
->
rbrq_head
)
?
" CON_CLOSED"
:
""
,
RBRQ_HBUF_ERR
(
he_dev
->
rbrq_head
)
?
" HBUF_ERR"
:
""
);
#ifdef USE_RBPS
if
(
RBRQ_ADDR
(
he_dev
->
rbrq_head
)
&
RBP_SMALLBUF
)
rbp
=
&
he_dev
->
rbps_base
[
RBP_INDEX
(
RBRQ_ADDR
(
he_dev
->
rbrq_head
))];
else
#endif
rbp
=
&
he_dev
->
rbpl_base
[
RBP_INDEX
(
RBRQ_ADDR
(
he_dev
->
rbrq_head
))];
buf_len
=
RBRQ_BUFLEN
(
he_dev
->
rbrq_head
)
*
4
;
cid
=
RBRQ_CID
(
he_dev
->
rbrq_head
);
#ifdef USE_HE_FIND_VCC
if
(
cid
!=
lastcid
)
vcc
=
he_find_vcc
(
he_dev
,
cid
);
lastcid
=
cid
;
#else
vcc
=
HE_LOOKUP_VCC
(
he_dev
,
cid
);
#endif
if
(
vcc
==
NULL
)
{
hprintk
(
"vcc == NULL (cid 0x%x)
\n
"
,
cid
);
if
(
!
RBRQ_HBUF_ERR
(
he_dev
->
rbrq_head
))
rbp
->
status
&=
~
RBP_LOANED
;
goto
next_rbrq_entry
;
}
he_vcc
=
HE_VCC
(
vcc
);
if
(
he_vcc
==
NULL
)
{
hprintk
(
"he_vcc == NULL (cid 0x%x)
\n
"
,
cid
);
if
(
!
RBRQ_HBUF_ERR
(
he_dev
->
rbrq_head
))
rbp
->
status
&=
~
RBP_LOANED
;
goto
next_rbrq_entry
;
}
if
(
RBRQ_HBUF_ERR
(
he_dev
->
rbrq_head
))
{
hprintk
(
"HBUF_ERR! (cid 0x%x)
\n
"
,
cid
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
vcc
->
stats
->
rx_drop
;
#else
atomic_inc
(
&
vcc
->
stats
->
rx_drop
);
#endif
goto
return_host_buffers
;
}
he_vcc
->
iov_tail
->
iov_base
=
(
void
*
)
RBRQ_ADDR
(
he_dev
->
rbrq_head
);
he_vcc
->
iov_tail
->
iov_len
=
buf_len
;
he_vcc
->
pdu_len
+=
buf_len
;
++
he_vcc
->
iov_tail
;
if
(
RBRQ_CON_CLOSED
(
he_dev
->
rbrq_head
))
{
lastcid
=
-
1
;
HPRINTK
(
"wake_up rx_waitq (cid 0x%x)
\n
"
,
cid
);
wake_up
(
&
he_vcc
->
rx_waitq
);
goto
return_host_buffers
;
}
#ifdef notdef
if
(
he_vcc
->
iov_tail
-
he_vcc
->
iov_head
>
32
)
{
hprintk
(
"iovec full! cid 0x%x
\n
"
,
cid
);
goto
return_host_buffers
;
}
#endif
if
(
!
RBRQ_END_PDU
(
he_dev
->
rbrq_head
))
goto
next_rbrq_entry
;
if
(
RBRQ_LEN_ERR
(
he_dev
->
rbrq_head
)
||
RBRQ_CRC_ERR
(
he_dev
->
rbrq_head
))
{
HPRINTK
(
"%s%s (%d.%d)
\n
"
,
RBRQ_CRC_ERR
(
he_dev
->
rbrq_head
)
?
"CRC_ERR "
:
""
,
RBRQ_LEN_ERR
(
he_dev
->
rbrq_head
)
?
"LEN_ERR"
:
""
,
vcc
->
vpi
,
vcc
->
vci
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
vcc
->
stats
->
rx_err
;
#else
atomic_inc
(
&
vcc
->
stats
->
rx_err
);
#endif
goto
return_host_buffers
;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,15)
skb
=
atm_alloc_charge
(
vcc
,
he_vcc
->
pdu_len
+
rx_skb_reserve
,
GFP_ATOMIC
);
#else
if
(
!
atm_charge
(
vcc
,
atm_pdu2truesize
(
he_vcc
->
pdu_len
+
rx_skb_reserve
)))
skb
=
NULL
;
else
{
skb
=
alloc_skb
(
he_vcc
->
pdu_len
+
rx_skb_reserve
,
GFP_ATOMIC
);
if
(
!
skb
)
atm_return
(
vcc
,
atm_pdu2truesize
(
he_vcc
->
pdu_len
+
rx_skb_reserve
));
}
#endif
if
(
!
skb
)
{
HPRINTK
(
"charge failed (%d.%d)
\n
"
,
vcc
->
vpi
,
vcc
->
vci
);
goto
return_host_buffers
;
}
if
(
rx_skb_reserve
>
0
)
skb_reserve
(
skb
,
rx_skb_reserve
);
do_gettimeofday
(
&
skb
->
stamp
);
for
(
iov
=
he_vcc
->
iov_head
;
iov
<
he_vcc
->
iov_tail
;
++
iov
)
{
#ifdef USE_RBPS
if
((
u32
)
iov
->
iov_base
&
RBP_SMALLBUF
)
memcpy
(
skb_put
(
skb
,
iov
->
iov_len
),
he_dev
->
rbps_virt
[
RBP_INDEX
(
iov
->
iov_base
)].
virt
,
iov
->
iov_len
);
else
#endif
memcpy
(
skb_put
(
skb
,
iov
->
iov_len
),
he_dev
->
rbpl_virt
[
RBP_INDEX
(
iov
->
iov_base
)].
virt
,
iov
->
iov_len
);
}
switch
(
vcc
->
qos
.
aal
)
{
case
ATM_AAL0
:
/* 2.10.1.5 raw cell receive */
skb
->
len
=
ATM_AAL0_SDU
;
skb
->
tail
=
skb
->
data
+
skb
->
len
;
break
;
case
ATM_AAL5
:
/* 2.10.1.2 aal5 receive */
skb
->
len
=
AAL5_LEN
(
skb
->
data
,
he_vcc
->
pdu_len
);
skb
->
tail
=
skb
->
data
+
skb
->
len
;
#ifdef USE_CHECKSUM_HW
if
(
vcc
->
vpi
==
0
&&
vcc
->
vci
>=
ATM_NOT_RSV_VCI
)
{
skb
->
ip_summed
=
CHECKSUM_HW
;
skb
->
csum
=
TCP_CKSUM
(
skb
->
data
,
he_vcc
->
pdu_len
);
}
#endif
break
;
}
#ifdef should_never_happen
if
(
skb
->
len
>
vcc
->
qos
.
rxtp
.
max_sdu
)
hprintk
(
"pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x
\n
"
,
skb
->
len
,
vcc
->
qos
.
rxtp
.
max_sdu
,
cid
);
#endif
#ifdef notdef
ATM_SKB
(
skb
)
->
vcc
=
vcc
;
#endif
vcc
->
push
(
vcc
,
skb
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
vcc
->
stats
->
rx
;
#else
atomic_inc
(
&
vcc
->
stats
->
rx
);
#endif
return_host_buffers:
++
pdus_assembled
;
for
(
iov
=
he_vcc
->
iov_head
;
iov
<
he_vcc
->
iov_tail
;
++
iov
)
{
#ifdef USE_RBPS
if
((
u32
)
iov
->
iov_base
&
RBP_SMALLBUF
)
rbp
=
&
he_dev
->
rbps_base
[
RBP_INDEX
(
iov
->
iov_base
)];
else
#endif
rbp
=
&
he_dev
->
rbpl_base
[
RBP_INDEX
(
iov
->
iov_base
)];
rbp
->
status
&=
~
RBP_LOANED
;
}
he_vcc
->
iov_tail
=
he_vcc
->
iov_head
;
he_vcc
->
pdu_len
=
0
;
next_rbrq_entry:
he_dev
->
rbrq_head
=
(
struct
he_rbrq
*
)
((
unsigned
long
)
he_dev
->
rbrq_base
|
RBRQ_MASK
(
++
he_dev
->
rbrq_head
));
}
if
(
updated
)
{
if
(
updated
>
he_dev
->
rbrq_peak
)
he_dev
->
rbrq_peak
=
updated
;
he_writel
(
he_dev
,
RBRQ_MASK
(
he_dev
->
rbrq_head
),
G0_RBRQ_H
+
(
group
*
16
));
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl
(
he_dev
,
G0_RBRQ_H
+
(
group
*
16
));
#endif
}
return
pdus_assembled
;
}
static
void
he_service_tbrq
(
struct
he_dev
*
he_dev
,
int
group
)
{
struct
he_tbrq
*
tbrq_tail
=
(
struct
he_tbrq
*
)
((
unsigned
long
)
he_dev
->
tbrq_base
|
he_dev
->
hsp
->
group
[
group
].
tbrq_tail
);
struct
he_tpd
*
tpd
;
int
slot
,
updated
=
0
;
#ifdef USE_TPD_POOL
struct
list_head
*
p
;
#endif
/* 2.1.6 transmit buffer return queue */
while
(
he_dev
->
tbrq_head
!=
tbrq_tail
)
{
++
updated
;
HPRINTK
(
"tbrq%d 0x%x%s%s
\n
"
,
group
,
TBRQ_TPD
(
he_dev
->
tbrq_head
),
TBRQ_EOS
(
he_dev
->
tbrq_head
)
?
" EOS"
:
""
,
TBRQ_MULTIPLE
(
he_dev
->
tbrq_head
)
?
" MULTIPLE"
:
""
);
#ifdef USE_TPD_POOL
tpd
=
NULL
;
p
=
&
he_dev
->
outstanding_tpds
;
while
((
p
=
p
->
next
)
!=
&
he_dev
->
outstanding_tpds
)
{
struct
he_tpd
*
__tpd
=
list_entry
(
p
,
struct
he_tpd
,
entry
);
if
(
TPD_ADDR
(
__tpd
->
status
)
==
TBRQ_TPD
(
he_dev
->
tbrq_head
))
{
tpd
=
__tpd
;
list_del
(
&
__tpd
->
entry
);
break
;
}
}
if
(
tpd
==
NULL
)
{
hprintk
(
"unable to locate tpd for dma buffer %x
\n
"
,
TBRQ_TPD
(
he_dev
->
tbrq_head
));
goto
next_tbrq_entry
;
}
#else
tpd
=
&
he_dev
->
tpd_base
[
TPD_INDEX
(
TBRQ_TPD
(
he_dev
->
tbrq_head
))
];
#endif
if
(
TBRQ_EOS
(
he_dev
->
tbrq_head
))
{
HPRINTK
(
"wake_up(tx_waitq) cid 0x%x
\n
"
,
he_mkcid
(
he_dev
,
tpd
->
vcc
->
vpi
,
tpd
->
vcc
->
vci
));
if
(
tpd
->
vcc
)
wake_up
(
&
HE_VCC
(
tpd
->
vcc
)
->
tx_waitq
);
goto
next_tbrq_entry
;
}
for
(
slot
=
0
;
slot
<
TPD_MAXIOV
;
++
slot
)
{
if
(
tpd
->
iovec
[
slot
].
addr
)
pci_unmap_single
(
he_dev
->
pci_dev
,
tpd
->
iovec
[
slot
].
addr
,
tpd
->
iovec
[
slot
].
len
&
TPD_LEN_MASK
,
PCI_DMA_TODEVICE
);
if
(
tpd
->
iovec
[
slot
].
len
&
TPD_LST
)
break
;
}
if
(
tpd
->
skb
)
/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
{
if
(
tpd
->
vcc
&&
tpd
->
vcc
->
pop
)
tpd
->
vcc
->
pop
(
tpd
->
vcc
,
tpd
->
skb
);
else
dev_kfree_skb_any
(
tpd
->
skb
);
}
next_tbrq_entry:
#ifdef USE_TPD_POOL
if
(
tpd
)
pci_pool_free
(
he_dev
->
tpd_pool
,
tpd
,
TPD_ADDR
(
tpd
->
status
));
#else
tpd
->
inuse
=
0
;
#endif
he_dev
->
tbrq_head
=
(
struct
he_tbrq
*
)
((
unsigned
long
)
he_dev
->
tbrq_base
|
TBRQ_MASK
(
++
he_dev
->
tbrq_head
));
}
if
(
updated
)
{
if
(
updated
>
he_dev
->
tbrq_peak
)
he_dev
->
tbrq_peak
=
updated
;
he_writel
(
he_dev
,
TBRQ_MASK
(
he_dev
->
tbrq_head
),
G0_TBRQ_H
+
(
group
*
16
));
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl
(
he_dev
,
G0_TBRQ_H
+
(
group
*
16
));
#endif
}
}
static
void
he_service_rbpl
(
struct
he_dev
*
he_dev
,
int
group
)
{
struct
he_rbp
*
newtail
;
struct
he_rbp
*
rbpl_head
;
int
moved
=
0
;
rbpl_head
=
(
struct
he_rbp
*
)
((
unsigned
long
)
he_dev
->
rbpl_base
|
RBPL_MASK
(
he_readl
(
he_dev
,
G0_RBPL_S
)));
for
(;;)
{
newtail
=
(
struct
he_rbp
*
)
((
unsigned
long
)
he_dev
->
rbpl_base
|
RBPL_MASK
(
he_dev
->
rbpl_tail
+
1
));
/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
if
((
newtail
==
rbpl_head
)
||
(
newtail
->
status
&
RBP_LOANED
))
break
;
newtail
->
status
|=
RBP_LOANED
;
he_dev
->
rbpl_tail
=
newtail
;
++
moved
;
}
if
(
moved
)
{
he_writel
(
he_dev
,
RBPL_MASK
(
he_dev
->
rbpl_tail
),
G0_RBPL_T
);
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl
(
he_dev
,
G0_RBPL_T
);
#endif
}
}
#ifdef USE_RBPS
static
void
he_service_rbps
(
struct
he_dev
*
he_dev
,
int
group
)
{
struct
he_rbp
*
newtail
;
struct
he_rbp
*
rbps_head
;
int
moved
=
0
;
rbps_head
=
(
struct
he_rbp
*
)
((
unsigned
long
)
he_dev
->
rbps_base
|
RBPS_MASK
(
he_readl
(
he_dev
,
G0_RBPS_S
)));
for
(;;)
{
newtail
=
(
struct
he_rbp
*
)
((
unsigned
long
)
he_dev
->
rbps_base
|
RBPS_MASK
(
he_dev
->
rbps_tail
+
1
));
/* table 3.42 -- rbps_tail should never be set to rbps_head */
if
((
newtail
==
rbps_head
)
||
(
newtail
->
status
&
RBP_LOANED
))
break
;
newtail
->
status
|=
RBP_LOANED
;
he_dev
->
rbps_tail
=
newtail
;
++
moved
;
}
if
(
moved
)
{
he_writel
(
he_dev
,
RBPS_MASK
(
he_dev
->
rbps_tail
),
G0_RBPS_T
);
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl
(
he_dev
,
G0_RBPS_T
);
#endif
}
}
#endif
/* USE_RBPS */
static
void
he_tasklet
(
unsigned
long
data
)
{
unsigned
long
flags
;
struct
he_dev
*
he_dev
=
(
struct
he_dev
*
)
data
;
int
group
,
type
;
int
updated
=
0
;
HPRINTK
(
"tasklet (0x%lx)
\n
"
,
data
);
#ifdef USE_TASKLET
HE_SPIN_LOCK
(
he_dev
,
flags
);
#endif
while
(
he_dev
->
irq_head
!=
he_dev
->
irq_tail
)
{
++
updated
;
type
=
ITYPE_TYPE
(
he_dev
->
irq_head
->
isw
);
group
=
ITYPE_GROUP
(
he_dev
->
irq_head
->
isw
);
switch
(
type
)
{
case
ITYPE_RBRQ_THRESH
:
hprintk
(
"rbrq%d threshold
\n
"
,
group
);
case
ITYPE_RBRQ_TIMER
:
if
(
he_service_rbrq
(
he_dev
,
group
))
{
he_service_rbpl
(
he_dev
,
group
);
#ifdef USE_RBPS
he_service_rbps
(
he_dev
,
group
);
#endif
/* USE_RBPS */
}
break
;
case
ITYPE_TBRQ_THRESH
:
hprintk
(
"tbrq%d threshold
\n
"
,
group
);
case
ITYPE_TPD_COMPLETE
:
he_service_tbrq
(
he_dev
,
group
);
break
;
case
ITYPE_RBPL_THRESH
:
he_service_rbpl
(
he_dev
,
group
);
break
;
case
ITYPE_RBPS_THRESH
:
#ifdef USE_RBPS
he_service_rbps
(
he_dev
,
group
);
#endif
/* USE_RBPS */
break
;
case
ITYPE_PHY
:
#ifdef CONFIG_ATM_HE_USE_SUNI
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
if
(
he_dev
->
atm_dev
->
phy
&&
he_dev
->
atm_dev
->
phy
->
interrupt
)
he_dev
->
atm_dev
->
phy
->
interrupt
(
he_dev
->
atm_dev
);
HE_SPIN_LOCK
(
he_dev
,
flags
);
#endif
HPRINTK1
(
"phy interrupt
\n
"
);
break
;
case
ITYPE_OTHER
:
switch
(
type
|
group
)
{
case
ITYPE_PARITY
:
hprintk1
(
"parity error
\n
"
);
break
;
case
ITYPE_ABORT
:
hprintk
(
"abort 0x%x
\n
"
,
he_readl
(
he_dev
,
ABORT_ADDR
));
break
;
}
break
;
default:
if
(
he_dev
->
irq_head
->
isw
==
ITYPE_INVALID
)
{
/* see 8.1.1 -- check all queues */
HPRINTK
(
"isw not updated 0x%x
\n
"
,
he_dev
->
irq_head
->
isw
);
he_service_rbrq
(
he_dev
,
0
);
he_service_rbpl
(
he_dev
,
0
);
#ifdef USE_RBPS
he_service_rbps
(
he_dev
,
0
);
#endif
/* USE_RBPS */
he_service_tbrq
(
he_dev
,
0
);
}
else
hprintk
(
"bad isw = 0x%x?
\n
"
,
he_dev
->
irq_head
->
isw
);
}
he_dev
->
irq_head
->
isw
=
ITYPE_INVALID
;
he_dev
->
irq_head
=
(
struct
he_irq
*
)
NEXT_ENTRY
(
he_dev
->
irq_base
,
he_dev
->
irq_head
,
IRQ_MASK
);
}
if
(
updated
)
{
if
(
updated
>
he_dev
->
irq_peak
)
he_dev
->
irq_peak
=
updated
;
he_writel
(
he_dev
,
IRQ_SIZE
(
CONFIG_IRQ_SIZE
)
|
IRQ_THRESH
(
CONFIG_IRQ_THRESH
)
|
IRQ_TAIL
(
he_dev
->
irq_tail
),
IRQ0_HEAD
);
(
void
)
he_readl
(
he_dev
,
INT_FIFO
);
/* 8.1.2 controller errata */
}
#ifdef USE_TASKLET
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
#endif
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,69)
static
irq_return_t
#else
static
void
#endif
he_irq_handler
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
{
unsigned
long
flags
;
struct
he_dev
*
he_dev
=
(
struct
he_dev
*
)
dev_id
;
int
handled
=
0
;
if
(
he_dev
==
NULL
)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,69)
return
IRQ_NONE
;
#else
return
;
#endif
HE_SPIN_LOCK
(
he_dev
,
flags
);
he_dev
->
irq_tail
=
(
struct
he_irq
*
)
(((
unsigned
long
)
he_dev
->
irq_base
)
|
(
*
he_dev
->
irq_tailoffset
<<
2
));
if
(
he_dev
->
irq_tail
==
he_dev
->
irq_head
)
{
HPRINTK1
(
"tailoffset not updated?
\n
"
);
he_dev
->
irq_tail
=
(
struct
he_irq
*
)
((
unsigned
long
)
he_dev
->
irq_base
|
((
he_readl
(
he_dev
,
IRQ0_BASE
)
&
IRQ_MASK
)
<<
2
));
(
void
)
he_readl
(
he_dev
,
INT_FIFO
);
/* 8.1.2 controller errata */
}
#ifdef DEBUG
if
(
he_dev
->
irq_head
==
he_dev
->
irq_tail
/* && !IRQ_PENDING */
)
hprintk1
(
"spurious (or shared) interrupt?
\n
"
);
#endif
if
(
he_dev
->
irq_head
!=
he_dev
->
irq_tail
)
{
handled
=
1
;
#ifdef USE_TASKLET
tasklet_schedule
(
&
he_dev
->
tasklet
);
#else
he_tasklet
((
unsigned
long
)
he_dev
);
#endif
he_writel
(
he_dev
,
INT_CLEAR_A
,
INT_FIFO
);
/* clear interrupt */
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl
(
he_dev
,
INT_FIFO
);
#endif
}
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,69)
return
IRQ_RETVAL
(
handled
);
#else
return
;
#endif
}
static
__inline__
void
__enqueue_tpd
(
struct
he_dev
*
he_dev
,
struct
he_tpd
*
tpd
,
unsigned
cid
)
{
struct
he_tpdrq
*
new_tail
;
HPRINTK
(
"tpdrq %p cid 0x%x -> tpdrq_tail %p
\n
"
,
tpd
,
cid
,
he_dev
->
tpdrq_tail
);
/* new_tail = he_dev->tpdrq_tail; */
new_tail
=
(
struct
he_tpdrq
*
)
((
unsigned
long
)
he_dev
->
tpdrq_base
|
TPDRQ_MASK
(
he_dev
->
tpdrq_tail
+
1
));
/*
* check to see if we are about to set the tail == head
* if true, update the head pointer from the adapter
* to see if this is really the case (reading the queue
* head for every enqueue would be unnecessarily slow)
*/
if
(
new_tail
==
he_dev
->
tpdrq_head
)
{
he_dev
->
tpdrq_head
=
(
struct
he_tpdrq
*
)
(((
unsigned
long
)
he_dev
->
tpdrq_base
)
|
TPDRQ_MASK
(
he_readl
(
he_dev
,
TPDRQ_B_H
)));
if
(
new_tail
==
he_dev
->
tpdrq_head
)
{
hprintk
(
"tpdrq full (cid 0x%x)
\n
"
,
cid
);
/*
* FIXME
* push tpd onto a transmit backlog queue
* after service_tbrq, service the backlog
* for now, we just drop the pdu
*/
if
(
tpd
->
skb
)
{
if
(
tpd
->
vcc
->
pop
)
tpd
->
vcc
->
pop
(
tpd
->
vcc
,
tpd
->
skb
);
else
dev_kfree_skb_any
(
tpd
->
skb
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
tpd
->
vcc
->
stats
->
tx_err
;
#else
atomic_inc
(
&
tpd
->
vcc
->
stats
->
tx_err
);
#endif
}
#ifdef USE_TPD_POOL
pci_pool_free
(
he_dev
->
tpd_pool
,
tpd
,
TPD_ADDR
(
tpd
->
status
));
#else
tpd
->
inuse
=
0
;
#endif
return
;
}
}
/* 2.1.5 transmit packet descriptor ready queue */
#ifdef USE_TPD_POOL
list_add_tail
(
&
tpd
->
entry
,
&
he_dev
->
outstanding_tpds
);
he_dev
->
tpdrq_tail
->
tpd
=
TPD_ADDR
(
tpd
->
status
);
#else
he_dev
->
tpdrq_tail
->
tpd
=
he_dev
->
tpd_base_phys
+
(
TPD_INDEX
(
tpd
->
status
)
*
sizeof
(
struct
he_tpd
));
#endif
he_dev
->
tpdrq_tail
->
cid
=
cid
;
wmb
();
he_dev
->
tpdrq_tail
=
new_tail
;
he_writel
(
he_dev
,
TPDRQ_MASK
(
he_dev
->
tpdrq_tail
),
TPDRQ_T
);
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl
(
he_dev
,
TPDRQ_T
);
#endif
}
static
int
he_open
(
struct
atm_vcc
*
vcc
,
short
vpi
,
int
vci
)
{
unsigned
long
flags
;
struct
he_dev
*
he_dev
=
HE_DEV
(
vcc
->
dev
);
struct
he_vcc
*
he_vcc
;
int
err
=
0
;
unsigned
cid
,
rsr0
,
rsr1
,
rsr4
,
tsr0
,
tsr0_aal
,
tsr4
,
period
,
reg
,
clock
;
if
((
err
=
atm_find_ci
(
vcc
,
&
vpi
,
&
vci
)))
{
HPRINTK
(
"atm_find_ci err = %d
\n
"
,
err
);
return
err
;
}
if
(
vci
==
ATM_VCI_UNSPEC
||
vpi
==
ATM_VPI_UNSPEC
)
return
0
;
vcc
->
vpi
=
vpi
;
vcc
->
vci
=
vci
;
HPRINTK
(
"open vcc %p %d.%d
\n
"
,
vcc
,
vpi
,
vci
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc
->
flags
|=
ATM_VF_ADDR
;
#else
set_bit
(
ATM_VF_ADDR
,
&
vcc
->
flags
);
#endif
cid
=
he_mkcid
(
he_dev
,
vpi
,
vci
);
he_vcc
=
(
struct
he_vcc
*
)
kmalloc
(
sizeof
(
struct
he_vcc
),
GFP_ATOMIC
);
if
(
he_vcc
==
NULL
)
{
hprintk1
(
"unable to allocate he_vcc during open
\n
"
);
return
-
ENOMEM
;
}
he_vcc
->
iov_tail
=
he_vcc
->
iov_head
;
he_vcc
->
pdu_len
=
0
;
he_vcc
->
rc_index
=
-
1
;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
init_waitqueue
(
&
he_vcc
->
rx_waitq
);
init_waitqueue
(
&
he_vcc
->
tx_waitq
);
#else
init_waitqueue_head
(
&
he_vcc
->
rx_waitq
);
init_waitqueue_head
(
&
he_vcc
->
tx_waitq
);
#endif
HE_VCC
(
vcc
)
=
he_vcc
;
if
(
vcc
->
qos
.
txtp
.
traffic_class
!=
ATM_NONE
)
{
int
pcr_goal
;
pcr_goal
=
atm_pcr_goal
(
&
vcc
->
qos
.
txtp
);
if
(
pcr_goal
==
0
)
pcr_goal
=
he_dev
->
atm_dev
->
link_rate
;
if
(
pcr_goal
<
0
)
/* means round down, technically */
pcr_goal
=
-
pcr_goal
;
HPRINTK
(
"open tx cid 0x%x pcr_goal %d
\n
"
,
cid
,
pcr_goal
);
switch
(
vcc
->
qos
.
aal
)
{
case
ATM_AAL5
:
tsr0_aal
=
TSR0_AAL5
;
tsr4
=
TSR4_AAL5
;
break
;
case
ATM_AAL0
:
tsr0_aal
=
TSR0_AAL0_SDU
;
tsr4
=
TSR4_AAL0_SDU
;
break
;
default:
err
=
-
EINVAL
;
goto
open_failed
;
}
HE_SPIN_LOCK
(
he_dev
,
flags
);
tsr0
=
he_readl_tsr0
(
he_dev
,
cid
);
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
if
(
TSR0_CONN_STATE
(
tsr0
)
!=
0
)
{
hprintk
(
"cid 0x%x not idle (tsr0 = 0x%x)
\n
"
,
cid
,
tsr0
);
err
=
-
EBUSY
;
goto
open_failed
;
}
switch
(
vcc
->
qos
.
txtp
.
traffic_class
)
{
case
ATM_UBR
:
/* 2.3.3.1 open connection ubr */
tsr0
=
TSR0_UBR
|
TSR0_GROUP
(
0
)
|
tsr0_aal
|
TSR0_USE_WMIN
|
TSR0_UPDATE_GER
;
break
;
case
ATM_CBR
:
/* 2.3.3.2 open connection cbr */
/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
if
((
he_dev
->
total_bw
+
pcr_goal
)
>
(
he_dev
->
atm_dev
->
link_rate
*
9
/
10
))
{
err
=
-
EBUSY
;
goto
open_failed
;
}
HE_SPIN_LOCK
(
he_dev
,
flags
);
/* also protects he_dev->cs_stper[] */
/* find an unused cs_stper register */
for
(
reg
=
0
;
reg
<
HE_NUM_CS_STPER
;
++
reg
)
if
(
he_dev
->
cs_stper
[
reg
].
inuse
==
0
||
he_dev
->
cs_stper
[
reg
].
pcr
==
pcr_goal
)
break
;
if
(
reg
==
HE_NUM_CS_STPER
)
{
err
=
-
EBUSY
;
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
goto
open_failed
;
}
he_dev
->
total_bw
+=
pcr_goal
;
he_vcc
->
rc_index
=
reg
;
++
he_dev
->
cs_stper
[
reg
].
inuse
;
he_dev
->
cs_stper
[
reg
].
pcr
=
pcr_goal
;
clock
=
he_is622
(
he_dev
)
?
66667000
:
50000000
;
period
=
clock
/
pcr_goal
;
HPRINTK
(
"rc_index = %d period = %d
\n
"
,
reg
,
period
);
he_writel_mbox
(
he_dev
,
rate_to_atmf
(
period
/
2
),
CS_STPER0
+
reg
);
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
tsr0
=
TSR0_CBR
|
TSR0_GROUP
(
0
)
|
tsr0_aal
|
TSR0_RC_INDEX
(
reg
);
break
;
default:
err
=
-
EINVAL
;
goto
open_failed
;
}
HE_SPIN_LOCK
(
he_dev
,
flags
);
he_writel_tsr0
(
he_dev
,
tsr0
,
cid
);
he_writel_tsr4
(
he_dev
,
tsr4
|
1
,
cid
);
he_writel_tsr1
(
he_dev
,
TSR1_MCR
(
rate_to_atmf
(
0
))
|
TSR1_PCR
(
rate_to_atmf
(
pcr_goal
)),
cid
);
he_writel_tsr2
(
he_dev
,
TSR2_ACR
(
rate_to_atmf
(
pcr_goal
)),
cid
);
he_writel_tsr9
(
he_dev
,
TSR9_OPEN_CONN
,
cid
);
he_writel_tsr3
(
he_dev
,
0x0
,
cid
);
he_writel_tsr5
(
he_dev
,
0x0
,
cid
);
he_writel_tsr6
(
he_dev
,
0x0
,
cid
);
he_writel_tsr7
(
he_dev
,
0x0
,
cid
);
he_writel_tsr8
(
he_dev
,
0x0
,
cid
);
he_writel_tsr10
(
he_dev
,
0x0
,
cid
);
he_writel_tsr11
(
he_dev
,
0x0
,
cid
);
he_writel_tsr12
(
he_dev
,
0x0
,
cid
);
he_writel_tsr13
(
he_dev
,
0x0
,
cid
);
he_writel_tsr14
(
he_dev
,
0x0
,
cid
);
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl_tsr0
(
he_dev
,
cid
);
#endif
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
}
if
(
vcc
->
qos
.
rxtp
.
traffic_class
!=
ATM_NONE
)
{
unsigned
aal
;
HPRINTK
(
"open rx cid 0x%x (rx_waitq %p)
\n
"
,
cid
,
&
HE_VCC
(
vcc
)
->
rx_waitq
);
switch
(
vcc
->
qos
.
aal
)
{
case
ATM_AAL5
:
aal
=
RSR0_AAL5
;
break
;
case
ATM_AAL0
:
aal
=
RSR0_RAWCELL
;
break
;
default:
err
=
-
EINVAL
;
goto
open_failed
;
}
HE_SPIN_LOCK
(
he_dev
,
flags
);
rsr0
=
he_readl_rsr0
(
he_dev
,
cid
);
if
(
rsr0
&
RSR0_OPEN_CONN
)
{
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
hprintk
(
"cid 0x%x not idle (rsr0 = 0x%x)
\n
"
,
cid
,
rsr0
);
err
=
-
EBUSY
;
goto
open_failed
;
}
#ifdef USE_RBPS
rsr1
=
RSR1_GROUP
(
0
);
rsr4
=
RSR4_GROUP
(
0
);
#else
/* !USE_RBPS */
rsr1
=
RSR1_GROUP
(
0
)
|
RSR1_RBPL_ONLY
;
rsr4
=
RSR4_GROUP
(
0
)
|
RSR4_RBPL_ONLY
;
#endif
/* USE_RBPS */
rsr0
=
vcc
->
qos
.
rxtp
.
traffic_class
==
ATM_UBR
?
(
RSR0_EPD_ENABLE
|
RSR0_PPD_ENABLE
)
:
0
;
#ifdef USE_CHECKSUM_HW
if
(
vpi
==
0
&&
vci
>=
ATM_NOT_RSV_VCI
)
rsr0
|=
RSR0_TCP_CKSUM
;
#endif
he_writel_rsr4
(
he_dev
,
rsr4
,
cid
);
he_writel_rsr1
(
he_dev
,
rsr1
,
cid
);
/* 5.1.11 last parameter initialized should be
the open/closed indication in rsr0 */
he_writel_rsr0
(
he_dev
,
rsr0
|
RSR0_START_PDU
|
RSR0_OPEN_CONN
|
aal
,
cid
);
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl_rsr0
(
he_dev
,
cid
);
#endif
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
#ifndef USE_HE_FIND_VCC
HE_LOOKUP_VCC
(
he_dev
,
cid
)
=
vcc
;
#endif
}
open_failed:
if
(
err
)
{
if
(
he_vcc
)
kfree
(
he_vcc
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc
->
flags
&=
~
ATM_VF_ADDR
;
#else
clear_bit
(
ATM_VF_ADDR
,
&
vcc
->
flags
);
#endif
}
else
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc
->
flags
|=
ATM_VF_READY
;
#else
set_bit
(
ATM_VF_READY
,
&
vcc
->
flags
);
#endif
}
return
err
;
}
static
void
he_close
(
struct
atm_vcc
*
vcc
)
{
unsigned
long
flags
;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,1)
DECLARE_WAITQUEUE
(
wait
,
current
);
#else
struct
wait_queue
wait
=
{
current
,
NULL
};
#endif
struct
he_dev
*
he_dev
=
HE_DEV
(
vcc
->
dev
);
struct
he_tpd
*
tpd
;
unsigned
cid
;
struct
he_vcc
*
he_vcc
=
HE_VCC
(
vcc
);
#define MAX_RETRY 30
int
retry
=
0
,
sleep
=
1
,
tx_inuse
;
HPRINTK
(
"close vcc %p %d.%d
\n
"
,
vcc
,
vcc
->
vpi
,
vcc
->
vci
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc
->
flags
&=
~
ATM_VF_READY
;
#else
clear_bit
(
ATM_VF_READY
,
&
vcc
->
flags
);
#endif
cid
=
he_mkcid
(
he_dev
,
vcc
->
vpi
,
vcc
->
vci
);
if
(
vcc
->
qos
.
rxtp
.
traffic_class
!=
ATM_NONE
)
{
int
timeout
;
HPRINTK
(
"close rx cid 0x%x
\n
"
,
cid
);
/* 2.7.2.2 close receive operation */
/* wait for previous close (if any) to finish */
HE_SPIN_LOCK
(
he_dev
,
flags
);
while
(
he_readl
(
he_dev
,
RCC_STAT
)
&
RCC_BUSY
)
{
HPRINTK
(
"close cid 0x%x RCC_BUSY
\n
"
,
cid
);
udelay
(
250
);
}
add_wait_queue
(
&
he_vcc
->
rx_waitq
,
&
wait
);
set_current_state
(
TASK_UNINTERRUPTIBLE
);
he_writel_rsr0
(
he_dev
,
RSR0_CLOSE_CONN
,
cid
);
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl_rsr0
(
he_dev
,
cid
);
#endif
he_writel_mbox
(
he_dev
,
cid
,
RXCON_CLOSE
);
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
timeout
=
schedule_timeout
(
30
*
HZ
);
remove_wait_queue
(
&
he_vcc
->
rx_waitq
,
&
wait
);
set_current_state
(
TASK_RUNNING
);
if
(
timeout
==
0
)
hprintk
(
"close rx timeout cid 0x%x
\n
"
,
cid
);
#ifndef USE_HE_FIND_VCC
HE_LOOKUP_VCC
(
he_dev
,
cid
)
=
NULL
;
#endif
HPRINTK
(
"close rx cid 0x%x complete
\n
"
,
cid
);
}
if
(
vcc
->
qos
.
txtp
.
traffic_class
!=
ATM_NONE
)
{
volatile
unsigned
tsr4
,
tsr0
;
int
timeout
;
HPRINTK
(
"close tx cid 0x%x
\n
"
,
cid
);
/* 2.1.2
*
* ... the host must first stop queueing packets to the TPDRQ
* on the connection to be closed, then wait for all outstanding
* packets to be transmitted and their buffers returned to the
* TBRQ. When the last packet on the connection arrives in the
* TBRQ, the host issues the close command to the adapter.
*/
while
(((
tx_inuse
=
atomic_read
(
&
vcc
->
sk
->
wmem_alloc
))
>
0
)
&&
(
retry
<
MAX_RETRY
))
{
set_current_state
(
TASK_UNINTERRUPTIBLE
);
(
void
)
schedule_timeout
(
sleep
);
set_current_state
(
TASK_RUNNING
);
if
(
sleep
<
HZ
)
sleep
=
sleep
*
2
;
++
retry
;
}
if
(
tx_inuse
)
hprintk
(
"close tx cid 0x%x tx_inuse = %d
\n
"
,
cid
,
tx_inuse
);
/* 2.3.1.1 generic close operations with flush */
HE_SPIN_LOCK
(
he_dev
,
flags
);
he_writel_tsr4_upper
(
he_dev
,
TSR4_FLUSH_CONN
,
cid
);
/* also clears TSR4_SESSION_ENDED */
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl_tsr4
(
he_dev
,
cid
);
#endif
switch
(
vcc
->
qos
.
txtp
.
traffic_class
)
{
case
ATM_UBR
:
he_writel_tsr1
(
he_dev
,
TSR1_MCR
(
rate_to_atmf
(
200000
))
|
TSR1_PCR
(
0
),
cid
);
break
;
case
ATM_CBR
:
he_writel_tsr14_upper
(
he_dev
,
TSR14_DELETE
,
cid
);
break
;
}
tpd
=
__alloc_tpd
(
he_dev
);
if
(
tpd
==
NULL
)
{
hprintk
(
"close tx he_alloc_tpd failed cid 0x%x
\n
"
,
cid
);
goto
close_tx_incomplete
;
}
tpd
->
status
|=
TPD_EOS
|
TPD_INT
;
tpd
->
skb
=
NULL
;
tpd
->
vcc
=
vcc
;
wmb
();
add_wait_queue
(
&
he_vcc
->
tx_waitq
,
&
wait
);
set_current_state
(
TASK_UNINTERRUPTIBLE
);
__enqueue_tpd
(
he_dev
,
tpd
,
cid
);
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
timeout
=
schedule_timeout
(
30
*
HZ
);
remove_wait_queue
(
&
he_vcc
->
tx_waitq
,
&
wait
);
set_current_state
(
TASK_RUNNING
);
if
(
timeout
==
0
)
{
hprintk
(
"close tx timeout cid 0x%x
\n
"
,
cid
);
goto
close_tx_incomplete
;
}
HE_SPIN_LOCK
(
he_dev
,
flags
);
while
(
!
((
tsr4
=
he_readl_tsr4
(
he_dev
,
cid
))
&
TSR4_SESSION_ENDED
))
{
HPRINTK
(
"close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)
\n
"
,
cid
,
tsr4
);
udelay
(
250
);
}
while
(
TSR0_CONN_STATE
(
tsr0
=
he_readl_tsr0
(
he_dev
,
cid
))
!=
0
)
{
HPRINTK
(
"close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)
\n
"
,
cid
,
tsr0
);
udelay
(
250
);
}
close_tx_incomplete:
if
(
vcc
->
qos
.
txtp
.
traffic_class
==
ATM_CBR
)
{
int
reg
=
he_vcc
->
rc_index
;
HPRINTK
(
"cs_stper reg = %d
\n
"
,
reg
);
if
(
he_dev
->
cs_stper
[
reg
].
inuse
==
0
)
hprintk
(
"cs_stper[%d].inuse = 0!
\n
"
,
reg
);
else
--
he_dev
->
cs_stper
[
reg
].
inuse
;
he_dev
->
total_bw
-=
he_dev
->
cs_stper
[
reg
].
pcr
;
}
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
HPRINTK
(
"close tx cid 0x%x complete
\n
"
,
cid
);
}
kfree
(
he_vcc
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
vcc
->
flags
&=
~
ATM_VF_ADDR
;
#else
clear_bit
(
ATM_VF_ADDR
,
&
vcc
->
flags
);
#endif
}
static
int
he_sg_send
(
struct
atm_vcc
*
vcc
,
unsigned
long
start
,
unsigned
long
size
)
{
#ifdef USE_SCATTERGATHER
return
1
;
#else
return
0
;
#endif
}
static
int
he_send
(
struct
atm_vcc
*
vcc
,
struct
sk_buff
*
skb
)
{
unsigned
long
flags
;
struct
he_dev
*
he_dev
=
HE_DEV
(
vcc
->
dev
);
unsigned
cid
=
he_mkcid
(
he_dev
,
vcc
->
vpi
,
vcc
->
vci
);
struct
he_tpd
*
tpd
;
#ifdef USE_SCATTERGATHER
int
i
,
slot
=
0
;
#endif
#define HE_TPD_BUFSIZE 0xffff
HPRINTK
(
"send %d.%d
\n
"
,
vcc
->
vpi
,
vcc
->
vci
);
if
((
skb
->
len
>
HE_TPD_BUFSIZE
)
||
((
vcc
->
qos
.
aal
==
ATM_AAL0
)
&&
(
skb
->
len
!=
ATM_AAL0_SDU
)))
{
hprintk
(
"buffer too large (or small) -- %d bytes
\n
"
,
skb
->
len
);
if
(
vcc
->
pop
)
vcc
->
pop
(
vcc
,
skb
);
else
dev_kfree_skb_any
(
skb
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
vcc
->
stats
->
tx_err
;
#else
atomic_inc
(
&
vcc
->
stats
->
tx_err
);
#endif
return
-
EINVAL
;
}
#ifndef USE_SCATTERGATHER
if
(
skb_shinfo
(
skb
)
->
nr_frags
)
{
hprintk1
(
"no scatter/gather support
\n
"
);
if
(
vcc
->
pop
)
vcc
->
pop
(
vcc
,
skb
);
else
dev_kfree_skb_any
(
skb
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
vcc
->
stats
->
tx_err
;
#else
atomic_inc
(
&
vcc
->
stats
->
tx_err
);
#endif
return
-
EINVAL
;
}
#endif
HE_SPIN_LOCK
(
he_dev
,
flags
);
tpd
=
__alloc_tpd
(
he_dev
);
if
(
tpd
==
NULL
)
{
if
(
vcc
->
pop
)
vcc
->
pop
(
vcc
,
skb
);
else
dev_kfree_skb_any
(
skb
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
vcc
->
stats
->
tx_err
;
#else
atomic_inc
(
&
vcc
->
stats
->
tx_err
);
#endif
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
return
-
ENOMEM
;
}
if
(
vcc
->
qos
.
aal
==
ATM_AAL5
)
tpd
->
status
|=
TPD_CELLTYPE
(
TPD_USERCELL
);
else
{
char
*
pti_clp
=
(
void
*
)
(
skb
->
data
+
3
);
int
clp
,
pti
;
pti
=
(
*
pti_clp
&
ATM_HDR_PTI_MASK
)
>>
ATM_HDR_PTI_SHIFT
;
clp
=
(
*
pti_clp
&
ATM_HDR_CLP
);
tpd
->
status
|=
TPD_CELLTYPE
(
pti
);
if
(
clp
)
tpd
->
status
|=
TPD_CLP
;
skb_pull
(
skb
,
ATM_AAL0_SDU
-
ATM_CELL_PAYLOAD
);
}
#ifdef USE_SCATTERGATHER
tpd
->
iovec
[
slot
].
addr
=
pci_map_single
(
he_dev
->
pci_dev
,
skb
->
data
,
skb
->
len
-
skb
->
data_len
,
PCI_DMA_TODEVICE
);
tpd
->
iovec
[
slot
].
len
=
skb
->
len
-
skb
->
data_len
;
++
slot
;
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
skb_frag_t
*
frag
=
&
skb_shinfo
(
skb
)
->
frags
[
i
];
if
(
slot
==
TPD_MAXIOV
)
/* send tpd; start new tpd */
{
tpd
->
vcc
=
vcc
;
tpd
->
skb
=
NULL
;
/* not the last fragment
so dont ->push() yet */
wmb
();
__enqueue_tpd
(
he_dev
,
tpd
,
cid
);
tpd
=
__alloc_tpd
(
he_dev
);
if
(
tpd
==
NULL
)
{
if
(
vcc
->
pop
)
vcc
->
pop
(
vcc
,
skb
);
else
dev_kfree_skb_any
(
skb
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
vcc
->
stats
->
tx_err
;
#else
atomic_inc
(
&
vcc
->
stats
->
tx_err
);
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
#endif
return
-
ENOMEM
;
}
tpd
->
status
|=
TPD_USERCELL
;
slot
=
0
;
}
tpd
->
iovec
[
slot
].
addr
=
pci_map_single
(
he_dev
->
pci_dev
,
(
void
*
)
page_address
(
frag
->
page
)
+
frag
->
page_offset
,
frag
->
size
,
PCI_DMA_TODEVICE
);
tpd
->
iovec
[
slot
].
len
=
frag
->
size
;
++
slot
;
}
tpd
->
iovec
[
slot
-
1
].
len
|=
TPD_LST
;
#else
tpd
->
address0
=
pci_map_single
(
he_dev
->
pci_dev
,
skb
->
data
,
skb
->
len
,
PCI_DMA_TODEVICE
);
tpd
->
length0
=
skb
->
len
|
TPD_LST
;
#endif
tpd
->
status
|=
TPD_INT
;
tpd
->
vcc
=
vcc
;
tpd
->
skb
=
skb
;
wmb
();
ATM_SKB
(
skb
)
->
vcc
=
vcc
;
__enqueue_tpd
(
he_dev
,
tpd
,
cid
);
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,99)
++
vcc
->
stats
->
tx
;
#else
atomic_inc
(
&
vcc
->
stats
->
tx
);
#endif
return
0
;
}
static
int
he_ioctl
(
struct
atm_dev
*
atm_dev
,
unsigned
int
cmd
,
void
*
arg
)
{
unsigned
long
flags
;
struct
he_dev
*
he_dev
=
HE_DEV
(
atm_dev
);
struct
he_ioctl_reg
reg
;
int
err
=
0
;
switch
(
cmd
)
{
case
HE_GET_REG
:
if
(
!
capable
(
CAP_NET_ADMIN
))
return
-
EPERM
;
copy_from_user
(
&
reg
,
(
struct
he_ioctl_reg
*
)
arg
,
sizeof
(
struct
he_ioctl_reg
));
HE_SPIN_LOCK
(
he_dev
,
flags
);
switch
(
reg
.
type
)
{
case
HE_REGTYPE_PCI
:
reg
.
val
=
he_readl
(
he_dev
,
reg
.
addr
);
break
;
case
HE_REGTYPE_RCM
:
reg
.
val
=
he_readl_rcm
(
he_dev
,
reg
.
addr
);
break
;
case
HE_REGTYPE_TCM
:
reg
.
val
=
he_readl_tcm
(
he_dev
,
reg
.
addr
);
break
;
case
HE_REGTYPE_MBOX
:
reg
.
val
=
he_readl_mbox
(
he_dev
,
reg
.
addr
);
break
;
default:
err
=
-
EINVAL
;
break
;
}
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
if
(
err
==
0
)
copy_to_user
((
struct
he_ioctl_reg
*
)
arg
,
&
reg
,
sizeof
(
struct
he_ioctl_reg
));
break
;
default:
#ifdef CONFIG_ATM_HE_USE_SUNI
if
(
atm_dev
->
phy
&&
atm_dev
->
phy
->
ioctl
)
err
=
atm_dev
->
phy
->
ioctl
(
atm_dev
,
cmd
,
arg
);
#else
/* CONFIG_ATM_HE_USE_SUNI */
return
-
EINVAL
;
#endif
/* CONFIG_ATM_HE_USE_SUNI */
break
;
}
return
err
;
}
static
void
he_phy_put
(
struct
atm_dev
*
atm_dev
,
unsigned
char
val
,
unsigned
long
addr
)
{
unsigned
long
flags
;
struct
he_dev
*
he_dev
=
HE_DEV
(
atm_dev
);
HPRINTK
(
"phy_put(val 0x%x, addr 0x%lx)
\n
"
,
val
,
addr
);
HE_SPIN_LOCK
(
he_dev
,
flags
);
he_writel
(
he_dev
,
val
,
FRAMER
+
(
addr
*
4
));
#ifdef CONFIG_IA64_SGI_SN2
(
void
)
he_readl
(
he_dev
,
FRAMER
+
(
addr
*
4
));
#endif
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
}
static
unsigned
char
he_phy_get
(
struct
atm_dev
*
atm_dev
,
unsigned
long
addr
)
{
unsigned
long
flags
;
struct
he_dev
*
he_dev
=
HE_DEV
(
atm_dev
);
unsigned
reg
;
HE_SPIN_LOCK
(
he_dev
,
flags
);
reg
=
he_readl
(
he_dev
,
FRAMER
+
(
addr
*
4
));
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
HPRINTK
(
"phy_get(addr 0x%lx) =0x%x
\n
"
,
addr
,
reg
);
return
reg
;
}
static
int
he_proc_read
(
struct
atm_dev
*
dev
,
loff_t
*
pos
,
char
*
page
)
{
unsigned
long
flags
;
struct
he_dev
*
he_dev
=
HE_DEV
(
dev
);
int
left
,
i
;
#ifdef notdef
struct
he_rbrq
*
rbrq_tail
;
struct
he_tpdrq
*
tpdrq_head
;
int
rbpl_head
,
rbpl_tail
;
#endif
static
long
mcc
=
0
,
oec
=
0
,
dcc
=
0
,
cec
=
0
;
left
=
*
pos
;
if
(
!
left
--
)
return
sprintf
(
page
,
"%s
\n
"
,
version
);
if
(
!
left
--
)
return
sprintf
(
page
,
"%s%s
\n\n
"
,
he_dev
->
prod_id
,
he_dev
->
media
&
0x40
?
"SM"
:
"MM"
);
if
(
!
left
--
)
return
sprintf
(
page
,
"Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells
\n
"
);
HE_SPIN_LOCK
(
he_dev
,
flags
);
mcc
+=
he_readl
(
he_dev
,
MCC
);
oec
+=
he_readl
(
he_dev
,
OEC
);
dcc
+=
he_readl
(
he_dev
,
DCC
);
cec
+=
he_readl
(
he_dev
,
CEC
);
HE_SPIN_UNLOCK
(
he_dev
,
flags
);
if
(
!
left
--
)
return
sprintf
(
page
,
"%16ld %16ld %13ld %17ld
\n\n
"
,
mcc
,
oec
,
dcc
,
cec
);
if
(
!
left
--
)
return
sprintf
(
page
,
"irq_size = %d inuse = ? peak = %d
\n
"
,
CONFIG_IRQ_SIZE
,
he_dev
->
irq_peak
);
if
(
!
left
--
)
return
sprintf
(
page
,
"tpdrq_size = %d inuse = ?
\n
"
,
CONFIG_TPDRQ_SIZE
);
if
(
!
left
--
)
return
sprintf
(
page
,
"rbrq_size = %d inuse = ? peak = %d
\n
"
,
CONFIG_RBRQ_SIZE
,
he_dev
->
rbrq_peak
);
if
(
!
left
--
)
return
sprintf
(
page
,
"tbrq_size = %d peak = %d
\n
"
,
CONFIG_TBRQ_SIZE
,
he_dev
->
tbrq_peak
);
#ifdef notdef
rbpl_head
=
RBPL_MASK
(
he_readl
(
he_dev
,
G0_RBPL_S
));
rbpl_tail
=
RBPL_MASK
(
he_readl
(
he_dev
,
G0_RBPL_T
));
inuse
=
rbpl_head
-
rbpl_tail
;
if
(
inuse
<
0
)
inuse
+=
CONFIG_RBPL_SIZE
*
sizeof
(
struct
he_rbp
);
inuse
/=
sizeof
(
struct
he_rbp
);
if
(
!
left
--
)
return
sprintf
(
page
,
"rbpl_size = %d inuse = %d
\n\n
"
,
CONFIG_RBPL_SIZE
,
inuse
);
#endif
if
(
!
left
--
)
return
sprintf
(
page
,
"rate controller periods (cbr)
\n
pcr #vc
\n
"
);
for
(
i
=
0
;
i
<
HE_NUM_CS_STPER
;
++
i
)
if
(
!
left
--
)
return
sprintf
(
page
,
"cs_stper%-2d %8ld %3d
\n
"
,
i
,
he_dev
->
cs_stper
[
i
].
pcr
,
he_dev
->
cs_stper
[
i
].
inuse
);
if
(
!
left
--
)
return
sprintf
(
page
,
"total bw (cbr): %d (limit %d)
\n
"
,
he_dev
->
total_bw
,
he_dev
->
atm_dev
->
link_rate
*
10
/
9
);
return
0
;
}
/* eeprom routines -- see 4.7 */
u8
read_prom_byte
(
struct
he_dev
*
he_dev
,
int
addr
)
{
u32
val
=
0
,
tmp_read
=
0
;
int
i
,
j
=
0
;
u8
byte_read
=
0
;
val
=
readl
(
he_dev
->
membase
+
HOST_CNTL
);
val
&=
0xFFFFE0FF
;
/* Turn on write enable */
val
|=
0x800
;
he_writel
(
he_dev
,
val
,
HOST_CNTL
);
/* Send READ instruction */
for
(
i
=
0
;
i
<
sizeof
(
readtab
)
/
sizeof
(
readtab
[
0
]);
i
++
)
{
he_writel
(
he_dev
,
val
|
readtab
[
i
],
HOST_CNTL
);
udelay
(
EEPROM_DELAY
);
}
/* Next, we need to send the byte address to read from */
for
(
i
=
7
;
i
>=
0
;
i
--
)
{
he_writel
(
he_dev
,
val
|
clocktab
[
j
++
]
|
(((
addr
>>
i
)
&
1
)
<<
9
),
HOST_CNTL
);
udelay
(
EEPROM_DELAY
);
he_writel
(
he_dev
,
val
|
clocktab
[
j
++
]
|
(((
addr
>>
i
)
&
1
)
<<
9
),
HOST_CNTL
);
udelay
(
EEPROM_DELAY
);
}
j
=
0
;
val
&=
0xFFFFF7FF
;
/* Turn off write enable */
he_writel
(
he_dev
,
val
,
HOST_CNTL
);
/* Now, we can read data from the EEPROM by clocking it in */
for
(
i
=
7
;
i
>=
0
;
i
--
)
{
he_writel
(
he_dev
,
val
|
clocktab
[
j
++
],
HOST_CNTL
);
udelay
(
EEPROM_DELAY
);
tmp_read
=
he_readl
(
he_dev
,
HOST_CNTL
);
byte_read
|=
(
unsigned
char
)
((
tmp_read
&
ID_DOUT
)
>>
ID_DOFFSET
<<
i
);
he_writel
(
he_dev
,
val
|
clocktab
[
j
++
],
HOST_CNTL
);
udelay
(
EEPROM_DELAY
);
}
he_writel
(
he_dev
,
val
|
ID_CS
,
HOST_CNTL
);
udelay
(
EEPROM_DELAY
);
return
(
byte_read
);
}
MODULE_AUTHOR
(
"chas williams <chas@cmf.nrl.navy.mil>"
);
MODULE_DESCRIPTION
(
"ForeRunnerHE ATM Adapter driver"
);
MODULE_PARM
(
disable64
,
"h"
);
MODULE_PARM_DESC
(
disable64
,
"disable 64-bit pci bus transfers"
);
MODULE_PARM
(
nvpibits
,
"i"
);
MODULE_PARM_DESC
(
nvpibits
,
"numbers of bits for vpi (default 0)"
);
MODULE_PARM
(
nvcibits
,
"i"
);
MODULE_PARM_DESC
(
nvcibits
,
"numbers of bits for vci (default 12)"
);
MODULE_PARM
(
rx_skb_reserve
,
"i"
);
MODULE_PARM_DESC
(
rx_skb_reserve
,
"padding for receive skb (default 16)"
);
MODULE_PARM
(
irq_coalesce
,
"i"
);
MODULE_PARM_DESC
(
irq_coalesce
,
"use interrupt coalescing (default 1)"
);
MODULE_PARM
(
sdh
,
"i"
);
MODULE_PARM_DESC
(
sdh
,
"use SDH framing (default 0)"
);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,1)
static
struct
pci_device_id
he_pci_tbl
[]
__devinitdata
=
{
{
PCI_VENDOR_ID_FORE
,
PCI_DEVICE_ID_FORE_HE
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
0
},
{
0
,
}
};
static
struct
pci_driver
he_driver
=
{
.
name
=
"he"
,
.
probe
=
he_init_one
,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,9)
.
remove
=
__devexit_p
(
he_remove_one
),
#else
.
remove
=
he_remove_one
,
#endif
.
id_table
=
he_pci_tbl
,
};
static
int
__init
he_init
(
void
)
{
return
pci_module_init
(
&
he_driver
);
}
static
void
__exit
he_cleanup
(
void
)
{
pci_unregister_driver
(
&
he_driver
);
}
module_init
(
he_init
);
module_exit
(
he_cleanup
);
#else
static
int
__init
he_init
()
{
if
(
!
pci_present
())
return
-
EIO
;
#ifdef CONFIG_ATM_HE_USE_SUNI_MODULE
/* request_module("suni"); */
#endif
pci_dev
=
NULL
;
while
((
pci_dev
=
pci_find_device
(
PCI_VENDOR_ID_FORE
,
PCI_DEVICE_ID_FORE_HE
,
pci_dev
))
!=
NULL
)
if
(
he_init_one
(
pci_dev
,
NULL
)
==
0
)
++
ndevs
;
return
(
ndevs
?
0
:
-
ENODEV
);
}
static
void
__devexit
he_cleanup
(
void
)
{
while
(
he_devs
)
{
struct
he_dev
*
next
=
he_devs
->
next
;
he_stop
(
he_devs
);
atm_dev_deregister
(
he_devs
->
atm_dev
);
kfree
(
he_devs
);
he_devs
=
next
;
}
}
int
init_module
(
void
)
{
return
he_init
();
}
void
cleanup_module
(
void
)
{
he_cleanup
();
}
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,7)
MODULE_LICENSE
(
"GPL"
);
#endif
drivers/atm/he.h
0 → 100644
View file @
15f4cd09
/* $Id: he.h,v 1.4 2003/05/06 22:48:00 chas Exp $ */
/*
he.h
ForeRunnerHE ATM Adapter driver for ATM on Linux
Copyright (C) 1999-2001 Naval Research Laboratory
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
he.h
ForeRunnerHE ATM Adapter driver for ATM on Linux
Copyright (C) 1999-2000 Naval Research Laboratory
Permission to use, copy, modify and distribute this software and its
documentation is hereby granted, provided that both the copyright
notice and this permission notice appear in all copies of the software,
derivative works or modified versions, and any portions thereof, and
that both notices appear in supporting documentation.
NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
RESULTING FROM THE USE OF THIS SOFTWARE.
*/
#ifndef _HE_H_
#define _HE_H_
#define DEV_LABEL "he"
#define CONFIG_DEFAULT_VCIBITS 12
#define CONFIG_DEFAULT_VPIBITS 0
#define CONFIG_IRQ_SIZE 128
#define CONFIG_IRQ_THRESH (CONFIG_IRQ_SIZE/2)
#define CONFIG_NUMTPDS 256
#define CONFIG_TPDRQ_SIZE 512
#define TPDRQ_MASK(x) (((unsigned long)(x))&((CONFIG_TPDRQ_SIZE<<3)-1))
#define CONFIG_RBRQ_SIZE 512
#define CONFIG_RBRQ_THRESH 400
#define RBRQ_MASK(x) (((unsigned long)(x))&((CONFIG_RBRQ_SIZE<<3)-1))
#define CONFIG_TBRQ_SIZE 512
#define CONFIG_TBRQ_THRESH 400
#define TBRQ_MASK(x) (((unsigned long)(x))&((CONFIG_TBRQ_SIZE<<2)-1))
#define CONFIG_RBPL_SIZE 512
#define CONFIG_RBPL_THRESH 64
#define CONFIG_RBPL_BUFSIZE 4096
#define RBPL_MASK(x) (((unsigned long)(x))&((CONFIG_RBPL_SIZE<<3)-1))
#define CONFIG_RBPS_SIZE 1024
#define CONFIG_RBPS_THRESH 64
#define CONFIG_RBPS_BUFSIZE 128
#define RBPS_MASK(x) (((unsigned long)(x))&((CONFIG_RBPS_SIZE<<3)-1))
/* 5.1.3 initialize connection memory */
#define CONFIG_RSRA 0x00000
#define CONFIG_RCMLBM 0x08000
#define CONFIG_RCMABR 0x0d800
#define CONFIG_RSRB 0x0e000
#define CONFIG_TSRA 0x00000
#define CONFIG_TSRB 0x08000
#define CONFIG_TSRC 0x0c000
#define CONFIG_TSRD 0x0e000
#define CONFIG_TMABR 0x0f000
#define CONFIG_TPDBA 0x10000
#define HE_MAXCIDBITS 12
/* 2.9.3.3 interrupt encodings */
struct
he_irq
{
volatile
u32
isw
;
};
#define IRQ_ALIGNMENT 0x1000
#define NEXT_ENTRY(base, tail, mask) \
(((unsigned long)base)|(((unsigned long)(tail+1))&mask))
#define ITYPE_INVALID 0xffffffff
#define ITYPE_TBRQ_THRESH (0<<3)
#define ITYPE_TPD_COMPLETE (1<<3)
#define ITYPE_RBPS_THRESH (2<<3)
#define ITYPE_RBPL_THRESH (3<<3)
#define ITYPE_RBRQ_THRESH (4<<3)
#define ITYPE_RBRQ_TIMER (5<<3)
#define ITYPE_PHY (6<<3)
#define ITYPE_OTHER 0x80
#define ITYPE_PARITY 0x81
#define ITYPE_ABORT 0x82
#define ITYPE_GROUP(x) (x & 0x7)
#define ITYPE_TYPE(x) (x & 0xf8)
#define HE_NUM_GROUPS 8
/* 2.1.4 transmit packet descriptor */
struct
he_tpd
{
/* read by the adapter */
volatile
u32
status
;
volatile
u32
reserved
;
#define TPD_MAXIOV 3
struct
{
u32
addr
,
len
;
}
iovec
[
TPD_MAXIOV
];
#define address0 iovec[0].addr
#define length0 iovec[0].len
/* linux-atm extensions */
struct
sk_buff
*
skb
;
struct
atm_vcc
*
vcc
;
#ifdef USE_TPD_POOL
struct
list_head
entry
;
#else
u32
inuse
;
char
padding
[
32
-
sizeof
(
u32
)
-
(
2
*
sizeof
(
void
*
))];
#endif
};
#define TPD_ALIGNMENT 64
#define TPD_LEN_MASK 0xffff
#define TPD_ADDR_SHIFT 6
#define TPD_MASK 0xffffffc0
#define TPD_ADDR(x) ((x) & TPD_MASK)
#define TPD_INDEX(x) (TPD_ADDR(x) >> TPD_ADDR_SHIFT)
/* table 2.3 transmit buffer return elements */
struct
he_tbrq
{
volatile
u32
tbre
;
};
#define TBRQ_ALIGNMENT CONFIG_TBRQ_SIZE
#define TBRQ_TPD(tbrq) ((tbrq)->tbre & 0xffffffc0)
#define TBRQ_EOS(tbrq) ((tbrq)->tbre & (1<<3))
#define TBRQ_MULTIPLE(tbrq) ((tbrq)->tbre & (1))
/* table 2.21 receive buffer return queue element field organization */
struct
he_rbrq
{
volatile
u32
addr
;
volatile
u32
cidlen
;
};
#define RBRQ_ALIGNMENT CONFIG_RBRQ_SIZE
#define RBRQ_ADDR(rbrq) ((rbrq)->addr & 0xffffffc0)
#define RBRQ_CRC_ERR(rbrq) ((rbrq)->addr & (1<<5))
#define RBRQ_LEN_ERR(rbrq) ((rbrq)->addr & (1<<4))
#define RBRQ_END_PDU(rbrq) ((rbrq)->addr & (1<<3))
#define RBRQ_AAL5_PROT(rbrq) ((rbrq)->addr & (1<<2))
#define RBRQ_CON_CLOSED(rbrq) ((rbrq)->addr & (1<<1))
#define RBRQ_HBUF_ERR(rbrq) ((rbrq)->addr & 1)
#define RBRQ_CID(rbrq) (((rbrq)->cidlen >> 16) & 0x1fff)
#define RBRQ_BUFLEN(rbrq) ((rbrq)->cidlen & 0xffff)
/* figure 2.3 transmit packet descriptor ready queue */
struct
he_tpdrq
{
volatile
u32
tpd
;
volatile
u32
cid
;
};
#define TPDRQ_ALIGNMENT CONFIG_TPDRQ_SIZE
/* table 2.30 host status page detail */
#define HSP_ALIGNMENT 0x400
/* must align on 1k boundary */
struct
he_hsp
{
struct
he_hsp_entry
{
volatile
u32
tbrq_tail
;
volatile
u32
reserved1
[
15
];
volatile
u32
rbrq_tail
;
volatile
u32
reserved2
[
15
];
}
group
[
HE_NUM_GROUPS
];
};
/* figure 2.9 receive buffer pools */
struct
he_rbp
{
volatile
u32
phys
;
volatile
u32
status
;
};
/* NOTE: it is suggested that virt be the virtual address of the host
buffer. on a 64-bit machine, this would not work. Instead, we
store the real virtual address in another list, and store an index
(and buffer status) in the virt member.
*/
#define RBP_INDEX_OFF 6
#define RBP_INDEX(x) (((long)(x) >> RBP_INDEX_OFF) & 0xffff)
#define RBP_LOANED 0x80000000
#define RBP_SMALLBUF 0x40000000
struct
he_virt
{
void
*
virt
;
};
#define RBPL_ALIGNMENT CONFIG_RBPL_SIZE
#define RBPS_ALIGNMENT CONFIG_RBPS_SIZE
#ifdef notyet
struct
he_group
{
u32
rpbs_size
,
rpbs_qsize
;
struct
he_rbp
rbps_ba
;
u32
rpbl_size
,
rpbl_qsize
;
struct
he_rpb_entry
*
rbpl_ba
;
};
#endif
#define HE_LOOKUP_VCC(dev, cid) ((dev)->he_vcc_table[(cid)].vcc)
struct
he_vcc_table
{
struct
atm_vcc
*
vcc
;
};
struct
he_cs_stper
{
long
pcr
;
int
inuse
;
};
#define HE_NUM_CS_STPER 16
struct
he_dev
{
unsigned
int
number
;
unsigned
int
irq
;
unsigned
long
membase
;
char
prod_id
[
30
];
char
mac_addr
[
6
];
int
media
;
/*
* 0x26 = HE155 MM
* 0x27 = HE622 MM
* 0x46 = HE155 SM
* 0x47 = HE622 SM
*/
unsigned
int
vcibits
,
vpibits
;
unsigned
int
cells_per_row
;
unsigned
int
bytes_per_row
;
unsigned
int
cells_per_lbuf
;
unsigned
int
r0_numrows
,
r0_startrow
,
r0_numbuffs
;
unsigned
int
r1_numrows
,
r1_startrow
,
r1_numbuffs
;
unsigned
int
tx_numrows
,
tx_startrow
,
tx_numbuffs
;
unsigned
int
buffer_limit
;
struct
he_vcc_table
*
he_vcc_table
;
#ifdef notyet
struct
he_group
group
[
HE_NUM_GROUPS
];
#endif
struct
he_cs_stper
cs_stper
[
HE_NUM_CS_STPER
];
unsigned
total_bw
;
dma_addr_t
irq_phys
;
struct
he_irq
*
irq_base
,
*
irq_head
,
*
irq_tail
;
volatile
unsigned
*
irq_tailoffset
;
int
irq_peak
;
#ifdef USE_TASKLET
struct
tasklet_struct
tasklet
;
#endif
#ifdef USE_TPD_POOL
struct
pci_pool
*
tpd_pool
;
struct
list_head
outstanding_tpds
;
#else
struct
he_tpd
*
tpd_head
,
*
tpd_base
,
*
tpd_end
;
dma_addr_t
tpd_base_phys
;
#endif
dma_addr_t
tpdrq_phys
;
struct
he_tpdrq
*
tpdrq_base
,
*
tpdrq_tail
,
*
tpdrq_head
;
spinlock_t
global_lock
;
/* 8.1.5 pci transaction ordering
error problem */
dma_addr_t
rbrq_phys
;
struct
he_rbrq
*
rbrq_base
,
*
rbrq_head
;
int
rbrq_peak
;
#ifdef USE_RBPL_POOL
struct
pci_pool
*
rbpl_pool
;
#else
void
*
rbpl_pages
;
dma_addr_t
rbpl_pages_phys
;
#endif
dma_addr_t
rbpl_phys
;
struct
he_rbp
*
rbpl_base
,
*
rbpl_tail
;
struct
he_virt
*
rbpl_virt
;
int
rbpl_peak
;
#ifdef USE_RBPS
#ifdef USE_RBPS_POOL
struct
pci_pool
*
rbps_pool
;
#else
void
*
rbps_pages
;
dma_addr_t
rbps_pages_phys
;
#endif
#endif
dma_addr_t
rbps_phys
;
struct
he_rbp
*
rbps_base
,
*
rbps_tail
;
struct
he_virt
*
rbps_virt
;
int
rbps_peak
;
dma_addr_t
tbrq_phys
;
struct
he_tbrq
*
tbrq_base
,
*
tbrq_head
;
int
tbrq_peak
;
dma_addr_t
hsp_phys
;
struct
he_hsp
*
hsp
;
struct
pci_dev
*
pci_dev
;
struct
atm_dev
*
atm_dev
;
struct
he_dev
*
next
;
};
struct
he_vcc
{
struct
iovec
iov_head
[
32
];
struct
iovec
*
iov_tail
;
int
pdu_len
;
int
rc_index
;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,1)
struct
wait_queue
*
rx_waitq
;
atruct
wait_queue
*
tx_waitq
;
#else
wait_queue_head_t
rx_waitq
;
wait_queue_head_t
tx_waitq
;
#endif
};
#define HE_VCC(vcc) ((struct he_vcc *)(vcc->dev_data))
#define PCI_VENDOR_ID_FORE 0x1127
#define PCI_DEVICE_ID_FORE_HE 0x400
#define HE_DMA_MASK 0xffffffff
#define GEN_CNTL_0 0x40
#define INT_PROC_ENBL (1<<25)
#define SLAVE_ENDIAN_MODE (1<<16)
#define MRL_ENB (1<<5)
#define MRM_ENB (1<<4)
#define INIT_ENB (1<<2)
#define IGNORE_TIMEOUT (1<<1)
#define ENBL_64 (1<<0)
#define MIN_PCI_LATENCY 32
/* errata 8.1.3 */
#define HE_DEV(dev) ((struct he_dev *) (dev)->dev_data)
#define he_is622(dev) ((dev)->media & 0x1)
#define HE_REGMAP_SIZE 0x100000
#define RESET_CNTL 0x80000
#define BOARD_RST_STATUS (1<<6)
#define HOST_CNTL 0x80004
#define PCI_BUS_SIZE64 (1<<27)
#define DESC_RD_STATIC_64 (1<<26)
#define DATA_RD_STATIC_64 (1<<25)
#define DATA_WR_STATIC_64 (1<<24)
#define ID_CS (1<<12)
#define ID_WREN (1<<11)
#define ID_DOUT (1<<10)
#define ID_DOFFSET 10
#define ID_DIN (1<<9)
#define ID_CLOCK (1<<8)
#define QUICK_RD_RETRY (1<<7)
#define QUICK_WR_RETRY (1<<6)
#define OUTFF_ENB (1<<5)
#define CMDFF_ENB (1<<4)
#define PERR_INT_ENB (1<<2)
#define IGNORE_INTR (1<<0)
#define LB_SWAP 0x80008
#define SWAP_RNUM_MAX(x) (x<<27)
#define DATA_WR_SWAP (1<<20)
#define DESC_RD_SWAP (1<<19)
#define DATA_RD_SWAP (1<<18)
#define INTR_SWAP (1<<17)
#define DESC_WR_SWAP (1<<16)
#define SDRAM_INIT (1<<15)
#define BIG_ENDIAN_HOST (1<<14)
#define XFER_SIZE (1<<7)
#define LB_MEM_ADDR 0x8000c
#define LB_MEM_DATA 0x80010
#define LB_MEM_ACCESS 0x80014
#define LB_MEM_HNDSHK (1<<30)
#define LM_MEM_WRITE (0x7)
#define LM_MEM_READ (0x3)
#define SDRAM_CTL 0x80018
#define LB_64_ENB (1<<3)
#define LB_TWR (1<<2)
#define LB_TRP (1<<1)
#define LB_TRAS (1<<0)
#define INT_FIFO 0x8001c
#define INT_MASK_D (1<<15)
#define INT_MASK_C (1<<14)
#define INT_MASK_B (1<<13)
#define INT_MASK_A (1<<12)
#define INT_CLEAR_D (1<<11)
#define INT_CLEAR_C (1<<10)
#define INT_CLEAR_B (1<<9)
#define INT_CLEAR_A (1<<8)
#define ABORT_ADDR 0x80020
#define IRQ0_BASE 0x80080
#define IRQ_BASE(x) (x<<12)
#define IRQ_MASK ((CONFIG_IRQ_SIZE<<2)-1)
/* was 0x3ff */
#define IRQ_TAIL(x) (((unsigned long)(x)) & IRQ_MASK)
#define IRQ0_HEAD 0x80084
#define IRQ_SIZE(x) (x<<22)
#define IRQ_THRESH(x) (x<<12)
#define IRQ_HEAD(x) (x<<2)
/* #define IRQ_PENDING (1) conflict with linux/irq.h */
#define IRQ0_CNTL 0x80088
#define IRQ_ADDRSEL(x) (x<<2)
#define IRQ_INT_A (0<<2)
#define IRQ_INT_B (1<<2)
#define IRQ_INT_C (2<<2)
#define IRQ_INT_D (3<<2)
#define IRQ_TYPE_ADDR 0x1
#define IRQ_TYPE_LINE 0x0
#define IRQ0_DATA 0x8008c
#define IRQ1_BASE 0x80090
#define IRQ1_HEAD 0x80094
#define IRQ1_CNTL 0x80098
#define IRQ1_DATA 0x8009c
#define IRQ2_BASE 0x800a0
#define IRQ2_HEAD 0x800a4
#define IRQ2_CNTL 0x800a8
#define IRQ2_DATA 0x800ac
#define IRQ3_BASE 0x800b0
#define IRQ3_HEAD 0x800b4
#define IRQ3_CNTL 0x800b8
#define IRQ3_DATA 0x800bc
#define GRP_10_MAP 0x800c0
#define GRP_32_MAP 0x800c4
#define GRP_54_MAP 0x800c8
#define GRP_76_MAP 0x800cc
#define G0_RBPS_S 0x80400
#define G0_RBPS_T 0x80404
#define RBP_TAIL(x) ((x)<<3)
#define RBP_MASK(x) ((x)|0x1fff)
#define G0_RBPS_QI 0x80408
#define RBP_QSIZE(x) ((x)<<14)
#define RBP_INT_ENB (1<<13)
#define RBP_THRESH(x) (x)
#define G0_RBPS_BS 0x8040c
#define G0_RBPL_S 0x80410
#define G0_RBPL_T 0x80414
#define G0_RBPL_QI 0x80418
#define G0_RBPL_BS 0x8041c
#define G1_RBPS_S 0x80420
#define G1_RBPS_T 0x80424
#define G1_RBPS_QI 0x80428
#define G1_RBPS_BS 0x8042c
#define G1_RBPL_S 0x80430
#define G1_RBPL_T 0x80434
#define G1_RBPL_QI 0x80438
#define G1_RBPL_BS 0x8043c
#define G2_RBPS_S 0x80440
#define G2_RBPS_T 0x80444
#define G2_RBPS_QI 0x80448
#define G2_RBPS_BS 0x8044c
#define G2_RBPL_S 0x80450
#define G2_RBPL_T 0x80454
#define G2_RBPL_QI 0x80458
#define G2_RBPL_BS 0x8045c
#define G3_RBPS_S 0x80460
#define G3_RBPS_T 0x80464
#define G3_RBPS_QI 0x80468
#define G3_RBPS_BS 0x8046c
#define G3_RBPL_S 0x80470
#define G3_RBPL_T 0x80474
#define G3_RBPL_QI 0x80478
#define G3_RBPL_BS 0x8047c
#define G4_RBPS_S 0x80480
#define G4_RBPS_T 0x80484
#define G4_RBPS_QI 0x80488
#define G4_RBPS_BS 0x8048c
#define G4_RBPL_S 0x80490
#define G4_RBPL_T 0x80494
#define G4_RBPL_QI 0x80498
#define G4_RBPL_BS 0x8049c
#define G5_RBPS_S 0x804a0
#define G5_RBPS_T 0x804a4
#define G5_RBPS_QI 0x804a8
#define G5_RBPS_BS 0x804ac
#define G5_RBPL_S 0x804b0
#define G5_RBPL_T 0x804b4
#define G5_RBPL_QI 0x804b8
#define G5_RBPL_BS 0x804bc
#define G6_RBPS_S 0x804c0
#define G6_RBPS_T 0x804c4
#define G6_RBPS_QI 0x804c8
#define G6_RBPS_BS 0x804cc
#define G6_RBPL_S 0x804d0
#define G6_RBPL_T 0x804d4
#define G6_RBPL_QI 0x804d8
#define G6_RBPL_BS 0x804dc
#define G7_RBPS_S 0x804e0
#define G7_RBPS_T 0x804e4
#define G7_RBPS_QI 0x804e8
#define G7_RBPS_BS 0x804ec
#define G7_RBPL_S 0x804f0
#define G7_RBPL_T 0x804f4
#define G7_RBPL_QI 0x804f8
#define G7_RBPL_BS 0x804fc
#define G0_RBRQ_ST 0x80500
#define G0_RBRQ_H 0x80504
#define G0_RBRQ_Q 0x80508
#define RBRQ_THRESH(x) ((x)<<13)
#define RBRQ_SIZE(x) (x)
#define G0_RBRQ_I 0x8050c
#define RBRQ_TIME(x) ((x)<<8)
#define RBRQ_COUNT(x) (x)
/* fill in 1 ... 7 later */
#define G0_TBRQ_B_T 0x80600
#define G0_TBRQ_H 0x80604
#define G0_TBRQ_S 0x80608
#define G0_TBRQ_THRESH 0x8060c
#define TBRQ_THRESH(x) (x)
/* fill in 1 ... 7 later */
#define RH_CONFIG 0x805c0
#define PHY_INT_ENB (1<<10)
#define OAM_GID(x) (x<<7)
#define PTMR_PRE(x) (x)
#define G0_INMQ_S 0x80580
#define G0_INMQ_L 0x80584
#define G1_INMQ_S 0x80588
#define G1_INMQ_L 0x8058c
#define G2_INMQ_S 0x80590
#define G2_INMQ_L 0x80594
#define G3_INMQ_S 0x80598
#define G3_INMQ_L 0x8059c
#define G4_INMQ_S 0x805a0
#define G4_INMQ_L 0x805a4
#define G5_INMQ_S 0x805a8
#define G5_INMQ_L 0x805ac
#define G6_INMQ_S 0x805b0
#define G6_INMQ_L 0x805b4
#define G7_INMQ_S 0x805b8
#define G7_INMQ_L 0x805bc
#define TPDRQ_B_H 0x80680
#define TPDRQ_T 0x80684
#define TPDRQ_S 0x80688
#define UBUFF_BA 0x8068c
#define RLBF0_H 0x806c0
#define RLBF0_T 0x806c4
#define RLBF1_H 0x806c8
#define RLBF1_T 0x806cc
#define RLBC_H 0x806d0
#define RLBC_T 0x806d4
#define RLBC_H2 0x806d8
#define TLBF_H 0x806e0
#define TLBF_T 0x806e4
#define RLBF0_C 0x806e8
#define RLBF1_C 0x806ec
#define RXTHRSH 0x806f0
#define LITHRSH 0x806f4
#define LBARB 0x80700
#define SLICE_X(x) (x<<28)
#define ARB_RNUM_MAX(x) (x<<23)
#define TH_PRTY(x) (x<<21)
#define RH_PRTY(x) (x<<19)
#define TL_PRTY(x) (x<<17)
#define RL_PRTY(x) (x<<15)
#define BUS_MULTI(x) (x<<8)
#define NET_PREF(x) (x)
#define SDRAMCON 0x80704
#define BANK_ON (1<<14)
#define WIDE_DATA (1<<13)
#define TWR_WAIT (1<<12)
#define TRP_WAIT (1<<11)
#define TRAS_WAIT (1<<10)
#define REF_RATE(x) (x)
#define LBSTAT 0x80708
#define RCC_STAT 0x8070c
#define RCC_BUSY (1)
#define TCMCONFIG 0x80740
#define TM_DESL2 (1<<10)
#define TM_BANK_WAIT(x) (x<<6)
#define TM_ADD_BANK4(x) (x<<4)
#define TM_PAR_CHECK(x) (x<<3)
#define TM_RW_WAIT(x) (x<<2)
#define TM_SRAM_TYPE(x) (x)
#define TSRB_BA 0x80744
#define TSRC_BA 0x80748
#define TMABR_BA 0x8074c
#define TPD_BA 0x80750
#define TSRD_BA 0x80758
#define TX_CONFIG 0x80760
#define DRF_THRESH(x) (x<<22)
#define TX_UT_MODE(x) (x<<21)
#define TX_VCI_MASK(x) (x<<17)
#define LBFREE_CNT(x) (x)
#define TXAAL5_PROTO 0x80764
#define CPCS_UU(x) (x<<8)
#define CPI(x) (x)
#define RCMCONFIG 0x80780
#define RM_DESL2(x) (x<<10)
#define RM_BANK_WAIT(x) (x<<6)
#define RM_ADD_BANK(x) (x<<4)
#define RM_PAR_CHECK(x) (x<<3)
#define RM_RW_WAIT(x) (x<<2)
#define RM_SRAM_TYPE(x) (x)
#define RCMRSRB_BA 0x80784
#define RCMLBM_BA 0x80788
#define RCMABR_BA 0x8078c
#define RC_CONFIG 0x807c0
#define UT_RD_DELAY(x) (x<<11)
#define WRAP_MODE(x) (x<<10)
#define RC_UT_MODE(x) (x<<9)
#define RX_ENABLE (1<<8)
#define RX_VALVP(x) (x<<4)
#define RX_VALVC(x) (x)
#define MCC 0x807c4
#define OEC 0x807c8
#define DCC 0x807cc
#define CEC 0x807d0
#define HSP_BA 0x807f0
#define LB_CONFIG 0x807f4
#define LB_SIZE(x) (x)
#define CON_DAT 0x807f8
#define CON_CTL 0x807fc
#define CON_CTL_MBOX (2<<30)
#define CON_CTL_TCM (1<<30)
#define CON_CTL_RCM (0<<30)
#define CON_CTL_WRITE (1<<29)
#define CON_CTL_READ (0<<29)
#define CON_CTL_BUSY (1<<28)
#define CON_BYTE_DISABLE_3 (1<<22)
/* 24..31 */
#define CON_BYTE_DISABLE_2 (1<<21)
/* 16..23 */
#define CON_BYTE_DISABLE_1 (1<<20)
/* 8..15 */
#define CON_BYTE_DISABLE_0 (1<<19)
/* 0..7 */
#define CON_CTL_ADDR(x) (x)
#define FRAMER 0x80800
/* to 0x80bfc */
/* 3.3 network controller (internal) mailbox registers */
#define CS_STPER0 0x0
/* ... */
#define CS_STPER31 0x01f
#define CS_STTIM0 0x020
/* ... */
#define CS_STTIM31 0x03f
#define CS_TGRLD0 0x040
/* ... */
#define CS_TGRLD15 0x04f
#define CS_ERTHR0 0x050
#define CS_ERTHR1 0x051
#define CS_ERTHR2 0x052
#define CS_ERTHR3 0x053
#define CS_ERTHR4 0x054
#define CS_ERCTL0 0x055
#define TX_ENABLE (1<<28)
#define ER_ENABLE (1<<27)
#define CS_ERCTL1 0x056
#define CS_ERCTL2 0x057
#define CS_ERSTAT0 0x058
#define CS_ERSTAT1 0x059
#define CS_RTCCT 0x060
#define CS_RTFWC 0x061
#define CS_RTFWR 0x062
#define CS_RTFTC 0x063
#define CS_RTATR 0x064
#define CS_TFBSET 0x070
#define CS_TFBADD 0x071
#define CS_TFBSUB 0x072
#define CS_WCRMAX 0x073
#define CS_WCRMIN 0x074
#define CS_WCRINC 0x075
#define CS_WCRDEC 0x076
#define CS_WCRCEIL 0x077
#define CS_BWDCNT 0x078
#define CS_OTPPER 0x080
#define CS_OTWPER 0x081
#define CS_OTTLIM 0x082
#define CS_OTTCNT 0x083
#define CS_HGRRT0 0x090
/* ... */
#define CS_HGRRT7 0x097
#define CS_ORPTRS 0x0a0
#define RXCON_CLOSE 0x100
#define RCM_MEM_SIZE 0x10000
/* 1M of 32-bit registers */
#define TCM_MEM_SIZE 0x20000
/* 2M of 32-bit registers */
/* 2.5 transmit connection memory registers */
#define TSR0_CONN_STATE(x) ((x>>28) & 0x7)
#define TSR0_USE_WMIN (1<<23)
#define TSR0_GROUP(x) ((x & 0x7)<<18)
#define TSR0_ABR (2<<16)
#define TSR0_UBR (1<<16)
#define TSR0_CBR (0<<16)
#define TSR0_PROT (1<<15)
#define TSR0_AAL0_SDU (2<<12)
#define TSR0_AAL0 (1<<12)
#define TSR0_AAL5 (0<<12)
#define TSR0_HALT_ER (1<<11)
#define TSR0_MARK_CI (1<<10)
#define TSR0_MARK_ER (1<<9)
#define TSR0_UPDATE_GER (1<<8)
#define TSR0_RC_INDEX(x) (x & 0x1F)
#define TSR1_PCR(x) ((x & 0x7FFF)<<16)
#define TSR1_MCR(x) (x & 0x7FFF)
#define TSR2_ACR(x) ((x & 0x7FFF)<<16)
#define TSR3_NRM_CNT(x) ((x & 0xFF)<<24)
#define TSR3_CRM_CNT(x) (x & 0xFFFF)
#define TSR4_FLUSH_CONN (1<<31)
#define TSR4_SESSION_ENDED (1<<30)
#define TSR4_CRC10 (1<<28)
#define TSR4_NULL_CRC10 (1<<27)
#define TSR4_PROT (1<<26)
#define TSR4_AAL0_SDU (2<<23)
#define TSR4_AAL0 (1<<23)
#define TSR4_AAL5 (0<<23)
#define TSR9_OPEN_CONN (1<<20)
#define TSR11_ICR(x) ((x & 0x7FFF)<<16)
#define TSR11_TRM(x) ((x & 0x7)<<13)
#define TSR11_NRM(x) ((x & 0x7)<<10)
#define TSR11_ADTF(x) (x & 0x3FF)
#define TSR13_RDF(x) ((x & 0xF)<<23)
#define TSR13_RIF(x) ((x & 0xF)<<19)
#define TSR13_CDF(x) ((x & 0x7)<<16)
#define TSR13_CRM(x) (x & 0xFFFF)
#define TSR14_DELETE (1<<31)
#define TSR14_ABR_CLOSE (1<<16)
/* 2.7.1 per connection receieve state registers */
#define RSR0_START_PDU (1<<10)
#define RSR0_OPEN_CONN (1<<6)
#define RSR0_CLOSE_CONN (0<<6)
#define RSR0_PPD_ENABLE (1<<5)
#define RSR0_EPD_ENABLE (1<<4)
#define RSR0_TCP_CKSUM (1<<3)
#define RSR0_AAL5 (0)
#define RSR0_AAL0 (1)
#define RSR0_AAL0_SDU (2)
#define RSR0_RAWCELL (3)
#define RSR0_RAWCELL_CRC10 (4)
#define RSR1_AQI_ENABLE (1<<20)
#define RSR1_RBPL_ONLY (1<<19)
#define RSR1_GROUP(x) ((x)<<16)
#define RSR4_AQI_ENABLE (1<<30)
#define RSR4_GROUP(x) ((x)<<27)
#define RSR4_RBPL_ONLY (1<<26)
/* 2.1.4 transmit packet descriptor */
#define TPD_USERCELL 0x0
#define TPD_SEGMENT_OAMF5 0x4
#define TPD_END2END_OAMF5 0x5
#define TPD_RMCELL 0x6
#define TPD_CELLTYPE(x) (x<<3)
#define TPD_EOS (1<<2)
#define TPD_CLP (1<<1)
#define TPD_INT (1<<0)
#define TPD_LST (1<<31)
/* table 4.3 serial eeprom information */
#define PROD_ID 0x08
/* char[] */
#define PROD_ID_LEN 30
#define HW_REV 0x26
/* char[] */
#define M_SN 0x3a
/* integer */
#define MEDIA 0x3e
/* integer */
#define HE155MM 0x26
#define HE155SM 0x27
#define HE622MM 0x46
#define HE622SM 0x47
#define MAC_ADDR 0x42
/* char[] */
#define CS_LOW 0x0
#define CS_HIGH ID_CS
/* HOST_CNTL_ID_PROM_SEL */
#define CLK_LOW 0x0
#define CLK_HIGH ID_CLOCK
/* HOST_CNTL_ID_PROM_CLOCK */
#define SI_HIGH ID_DIN
/* HOST_CNTL_ID_PROM_DATA_IN */
#define EEPROM_DELAY 400
/* microseconds */
/* Read from EEPROM = 0000 0011b */
unsigned
int
readtab
[]
=
{
CS_HIGH
|
CLK_HIGH
,
CS_LOW
|
CLK_LOW
,
CLK_HIGH
,
/* 0 */
CLK_LOW
,
CLK_HIGH
,
/* 0 */
CLK_LOW
,
CLK_HIGH
,
/* 0 */
CLK_LOW
,
CLK_HIGH
,
/* 0 */
CLK_LOW
,
CLK_HIGH
,
/* 0 */
CLK_LOW
,
CLK_HIGH
,
/* 0 */
CLK_LOW
|
SI_HIGH
,
CLK_HIGH
|
SI_HIGH
,
/* 1 */
CLK_LOW
|
SI_HIGH
,
CLK_HIGH
|
SI_HIGH
/* 1 */
};
/* Clock to read from/write to the EEPROM */
unsigned
int
clocktab
[]
=
{
CLK_LOW
,
CLK_HIGH
,
CLK_LOW
,
CLK_HIGH
,
CLK_LOW
,
CLK_HIGH
,
CLK_LOW
,
CLK_HIGH
,
CLK_LOW
,
CLK_HIGH
,
CLK_LOW
,
CLK_HIGH
,
CLK_LOW
,
CLK_HIGH
,
CLK_LOW
,
CLK_HIGH
,
CLK_LOW
};
#endif
/* _HE_H_ */
include/linux/atm_he.h
0 → 100644
View file @
15f4cd09
/* atm_he.h */
#ifndef LINUX_ATM_HE_H
#define LINUX_ATM_HE_H
#include <linux/atmioc.h>
#define HE_GET_REG _IOW('a', ATMIOC_SARPRV, struct atmif_sioc)
#define HE_REGTYPE_PCI 1
#define HE_REGTYPE_RCM 2
#define HE_REGTYPE_TCM 3
#define HE_REGTYPE_MBOX 4
struct
he_ioctl_reg
{
unsigned
addr
,
val
;
char
type
;
};
#endif
/* LINUX_ATM_HE_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment