Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a6a66e46
Commit
a6a66e46
authored
Mar 23, 2004
by
David Mosberger
Browse files
Options
Browse Files
Download
Plain Diff
Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5
into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents
db80df6e
e4c4e244
Changes
19
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
196 additions
and
448 deletions
+196
-448
arch/ia64/Kconfig
arch/ia64/Kconfig
+0
-50
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/common/sba_iommu.c
+8
-0
arch/ia64/kernel/gate.S
arch/ia64/kernel/gate.S
+1
-1
arch/ia64/kernel/machvec.c
arch/ia64/kernel/machvec.c
+21
-2
arch/ia64/kernel/process.c
arch/ia64/kernel/process.c
+13
-4
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+37
-32
arch/ia64/kernel/vmlinux.lds.S
arch/ia64/kernel/vmlinux.lds.S
+9
-0
arch/ia64/lib/swiotlb.c
arch/ia64/lib/swiotlb.c
+7
-0
arch/ia64/pci/pci.c
arch/ia64/pci/pci.c
+2
-1
arch/ia64/sn/io/machvec/pci_dma.c
arch/ia64/sn/io/machvec/pci_dma.c
+7
-0
arch/ia64/sn/io/sn2/shub.c
arch/ia64/sn/io/sn2/shub.c
+37
-255
arch/ia64/sn/kernel/mca.c
arch/ia64/sn/kernel/mca.c
+1
-1
arch/ia64/sn/kernel/sn2/sn_proc_fs.c
arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+7
-50
drivers/char/sn_serial.c
drivers/char/sn_serial.c
+9
-9
include/asm-ia64/dma-mapping.h
include/asm-ia64/dma-mapping.h
+2
-1
include/asm-ia64/machvec.h
include/asm-ia64/machvec.h
+20
-36
include/asm-ia64/machvec_hpzx1.h
include/asm-ia64/machvec_hpzx1.h
+8
-6
include/asm-ia64/machvec_sn2.h
include/asm-ia64/machvec_sn2.h
+2
-0
include/asm-ia64/sn/sndrv.h
include/asm-ia64/sn/sndrv.h
+5
-0
No files found.
arch/ia64/Kconfig
View file @
a6a66e46
...
...
@@ -214,15 +214,6 @@ config FORCE_MAX_ZONEORDER
int
default "18"
config IA64_PAL_IDLE
bool "Use PAL_HALT_LIGHT in idle loop"
help
Say Y here to enable use of PAL_HALT_LIGHT in the cpu_idle loop.
This allows the CPU to enter a low power state when idle. You
can enable CONFIG_IA64_PALINFO and check /proc/pal/cpu0/power_info
to see the power consumption and latency for this state. If you're
unsure your firmware supports it, answer N.
config SMP
bool "Symmetric multi-processing support"
help
...
...
@@ -344,47 +335,6 @@ config ACPI
bool
depends on !IA64_HP_SIM
default y
help
ACPI/OSPM support for Linux is currently under development. As such,
this support is preliminary and EXPERIMENTAL. Configuring ACPI
support enables kernel interfaces that allow higher level software
(OSPM) to manipulate ACPI defined hardware and software interfaces,
including the evaluation of ACPI control methods. If unsure, choose
N here. Note, this option will enlarge your kernel by about 120K.
This support requires an ACPI compliant platform (hardware/firmware).
If both ACPI and Advanced Power Management (APM) support are
configured, whichever is loaded first shall be used.
This code DOES NOT currently provide a complete OSPM implementation
-- it has not yet reached APM's level of functionality. When fully
implemented, Linux ACPI/OSPM will provide a more robust functional
replacement for legacy configuration and power management
interfaces, including the Plug-and-Play BIOS specification (PnP
BIOS), the Multi-Processor Specification (MPS), and the Advanced
Power Management specification (APM).
Linux support for ACPI/OSPM is based on Intel Corporation's ACPI
Component Architecture (ACPI CA). The latest ACPI CA source code,
documentation, debug builds, and implementation status information
can be downloaded from:
<http://developer.intel.com/technology/iapc/acpi/downloads.htm>.
The ACPI Sourceforge project may also be of interest:
<http://sf.net/projects/acpi/>
config ACPI_INTERPRETER
bool
depends on !IA64_HP_SIM
default y
config ACPI_KERNEL_CONFIG
bool
depends on !IA64_HP_SIM
default y
help
If you say `Y' here, Linux's ACPI support will use the
hardware-level system descriptions found on IA-64 systems.
if !IA64_HP_SIM
...
...
arch/ia64/hp/common/sba_iommu.c
View file @
a6a66e46
...
...
@@ -1678,6 +1678,7 @@ struct ioc_iommu {
static
struct
ioc_iommu
ioc_iommu_info
[]
__initdata
=
{
{
ZX1_IOC_ID
,
"zx1"
,
ioc_zx1_init
},
{
SX1000_IOC_ID
,
"sx1000"
,
NULL
},
};
static
struct
ioc
*
__init
...
...
@@ -1979,6 +1980,12 @@ sba_dma_supported (struct device *dev, u64 mask)
return
((
mask
&
0xFFFFFFFFUL
)
==
0xFFFFFFFFUL
);
}
int
sba_dma_mapping_error
(
dma_addr_t
dma_addr
)
{
return
0
;
}
__setup
(
"nosbagart"
,
nosbagart
);
static
int
__init
...
...
@@ -2004,6 +2011,7 @@ sba_page_override(char *str)
__setup
(
"sbapagesize="
,
sba_page_override
);
EXPORT_SYMBOL
(
sba_dma_mapping_error
);
EXPORT_SYMBOL
(
sba_map_single
);
EXPORT_SYMBOL
(
sba_unmap_single
);
EXPORT_SYMBOL
(
sba_map_sg
);
...
...
arch/ia64/kernel/gate.S
View file @
a6a66e46
...
...
@@ -234,7 +234,7 @@ back_from_setup_rbs:
br.call.sptk.many
rp
=
b6
//
call
the
signal
handler
.
ret0
:
adds
base0
=(
BSP_OFF
+
SIGCONTEXT_OFF
),
sp
;;
ld8
r15
=[
base0
]
,(
CFM_OFF
-
BSP_OFF
)
//
fetch
sc_ar_bsp
and
advance
to
CFM_OFF
ld8
r15
=[
base0
]
//
fetch
sc_ar_bsp
mov
r14
=
ar
.
bsp
;;
cmp.ne
p1
,
p0
=
r14
,
r15
//
do
we
need
to
restore
the
rbs
?
...
...
arch/ia64/kernel/machvec.c
View file @
a6a66e46
...
...
@@ -50,8 +50,27 @@ machvec_noop (void)
EXPORT_SYMBOL
(
machvec_noop
);
void
machvec_memory_fence
(
void
)
machvec_setup
(
char
**
arg
)
{
}
EXPORT_SYMBOL
(
machvec_setup
);
void
machvec_timer_interrupt
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
{
}
EXPORT_SYMBOL
(
machvec_timer_interrupt
);
void
machvec_dma_sync_single
(
struct
device
*
hwdev
,
dma_addr_t
dma_handle
,
size_t
size
,
int
dir
)
{
mb
();
}
EXPORT_SYMBOL
(
machvec_dma_sync_single
);
void
machvec_dma_sync_sg
(
struct
device
*
hwdev
,
struct
scatterlist
*
sg
,
int
n
,
int
dir
)
{
mb
();
}
EXPORT_SYMBOL
(
machvec_
memory_fence
);
EXPORT_SYMBOL
(
machvec_
dma_sync_sg
);
arch/ia64/kernel/process.c
View file @
a6a66e46
...
...
@@ -159,16 +159,25 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
ia64_do_signal
(
oldset
,
scr
,
in_syscall
);
}
static
int
pal_halt
=
1
;
static
int
__init
nohalt_setup
(
char
*
str
)
{
pal_halt
=
0
;
return
1
;
}
__setup
(
"nohalt"
,
nohalt_setup
);
/*
* We use this if we don't have any better idle routine..
*/
void
default_idle
(
void
)
{
#ifdef CONFIG_IA64_PAL_IDLE
if
(
!
need_resched
())
safe_halt
();
#endif
unsigned
long
pmu_active
=
ia64_getreg
(
_IA64_REG_PSR
)
&
(
IA64_PSR_PP
|
IA64_PSR_UP
);
while
(
!
need_resched
())
if
(
pal_halt
&&
!
pmu_active
)
safe_halt
();
}
void
__attribute__
((
noreturn
))
...
...
arch/ia64/kernel/setup.c
View file @
a6a66e46
...
...
@@ -229,6 +229,38 @@ find_initrd (void)
#endif
}
static
void
__init
io_port_init
(
void
)
{
extern
unsigned
long
ia64_iobase
;
unsigned
long
phys_iobase
;
/*
* Set `iobase' to the appropriate address in region 6 (uncached access range).
*
* The EFI memory map is the "preferred" location to get the I/O port space base,
* rather the relying on AR.KR0. This should become more clear in future SAL
* specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
* found in the memory map.
*/
phys_iobase
=
efi_get_iobase
();
if
(
phys_iobase
)
/* set AR.KR0 since this is all we use it for anyway */
ia64_set_kr
(
IA64_KR_IO_BASE
,
phys_iobase
);
else
{
phys_iobase
=
ia64_get_kr
(
IA64_KR_IO_BASE
);
printk
(
KERN_INFO
"No I/O port range found in EFI memory map, falling back "
"to AR.KR0
\n
"
);
printk
(
KERN_INFO
"I/O port base = 0x%lx
\n
"
,
phys_iobase
);
}
ia64_iobase
=
(
unsigned
long
)
ioremap
(
phys_iobase
,
0
);
/* setup legacy IO port space */
io_space
[
0
].
mmio_base
=
ia64_iobase
;
io_space
[
0
].
sparse
=
1
;
num_io_spaces
=
1
;
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
static
void
__init
setup_serial_legacy
(
void
)
...
...
@@ -251,9 +283,6 @@ setup_serial_legacy (void)
void
__init
setup_arch
(
char
**
cmdline_p
)
{
extern
unsigned
long
ia64_iobase
;
unsigned
long
phys_iobase
;
unw_init
();
ia64_patch_vtop
((
u64
)
__start___vtop_patchlist
,
(
u64
)
__end___vtop_patchlist
);
...
...
@@ -262,6 +291,11 @@ setup_arch (char **cmdline_p)
strlcpy
(
saved_command_line
,
*
cmdline_p
,
sizeof
(
saved_command_line
));
efi_init
();
io_port_init
();
#ifdef CONFIG_IA64_GENERIC
machvec_init
(
acpi_get_sysname
());
#endif
#ifdef CONFIG_ACPI_BOOT
/* Initialize the ACPI boot-time table parser */
...
...
@@ -280,35 +314,6 @@ setup_arch (char **cmdline_p)
/* process SAL system table: */
ia64_sal_init
(
efi
.
sal_systab
);
#ifdef CONFIG_IA64_GENERIC
machvec_init
(
acpi_get_sysname
());
#endif
/*
* Set `iobase' to the appropriate address in region 6 (uncached access range).
*
* The EFI memory map is the "preferred" location to get the I/O port space base,
* rather the relying on AR.KR0. This should become more clear in future SAL
* specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
* found in the memory map.
*/
phys_iobase
=
efi_get_iobase
();
if
(
phys_iobase
)
/* set AR.KR0 since this is all we use it for anyway */
ia64_set_kr
(
IA64_KR_IO_BASE
,
phys_iobase
);
else
{
phys_iobase
=
ia64_get_kr
(
IA64_KR_IO_BASE
);
printk
(
KERN_INFO
"No I/O port range found in EFI memory map, falling back "
"to AR.KR0
\n
"
);
printk
(
KERN_INFO
"I/O port base = 0x%lx
\n
"
,
phys_iobase
);
}
ia64_iobase
=
(
unsigned
long
)
ioremap
(
phys_iobase
,
0
);
/* setup legacy IO port space */
io_space
[
0
].
mmio_base
=
ia64_iobase
;
io_space
[
0
].
sparse
=
1
;
num_io_spaces
=
1
;
#ifdef CONFIG_SMP
cpu_physical_id
(
0
)
=
hard_smp_processor_id
();
#endif
...
...
arch/ia64/kernel/vmlinux.lds.S
View file @
a6a66e46
...
...
@@ -12,6 +12,11 @@ OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH
(
ia64
)
ENTRY
(
phys_start
)
jiffies
=
jiffies_64
;
PHDRS
{
code
PT_LOAD
;
percpu
PT_LOAD
;
data
PT_LOAD
;
}
SECTIONS
{
/
*
Sections
to
be
discarded
*/
...
...
@@ -26,6 +31,7 @@ SECTIONS
v
=
PAGE_OFFSET
; /* this symbol is here to make debugging easier... */
phys_start
=
_start
-
LOAD_OFFSET
;
code
:
{
}
:
code
.
=
KERNEL_START
;
_text
=
.
;
...
...
@@ -173,6 +179,7 @@ SECTIONS
{
*(
.
data
.
cacheline_aligned
)
}
/
*
Per
-
cpu
data
:
*/
percpu
:
{
}
:
percpu
.
=
ALIGN
(
PERCPU_PAGE_SIZE
)
;
__phys_per_cpu_start
=
.
;
.
data.percpu
PERCPU_ADDR
:
AT
(
__phys_per_cpu_start
-
LOAD_OFFSET
)
...
...
@@ -183,6 +190,7 @@ SECTIONS
}
.
=
__phys_per_cpu_start
+
PERCPU_PAGE_SIZE
; /* ensure percpu data fits into percpu page size */
data
:
{
}
:
data
.
data
:
AT
(
ADDR
(
.
data
)
-
LOAD_OFFSET
)
{
*(
.
data
)
*(
.
data1
)
*(
.
gnu
.
linkonce
.
d
*)
CONSTRUCTORS
}
...
...
@@ -205,6 +213,7 @@ SECTIONS
_end
=
.
;
code
:
{
}
:
code
/
*
Stabs
debugging
sections
.
*/
.
stab
0
:
{
*(
.
stab
)
}
.
stabstr
0
:
{
*(
.
stabstr
)
}
...
...
arch/ia64/lib/swiotlb.c
View file @
a6a66e46
...
...
@@ -498,6 +498,12 @@ swiotlb_sync_sg_for_device (struct device *hwdev, struct scatterlist *sg, int ne
sync_single
(
hwdev
,
(
void
*
)
sg
->
dma_address
,
sg
->
dma_length
,
dir
);
}
int
swiotlb_dma_mapping_error
(
dma_addr_t
dma_addr
)
{
return
0
;
}
/*
* Return whether the given PCI device DMA address mask can be supported properly. For
* example, if your device can only drive the low 24-bits during PCI bus mastering, then
...
...
@@ -518,6 +524,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
EXPORT_SYMBOL
(
swiotlb_sync_single_for_device
);
EXPORT_SYMBOL
(
swiotlb_sync_sg_for_cpu
);
EXPORT_SYMBOL
(
swiotlb_sync_sg_for_device
);
EXPORT_SYMBOL
(
swiotlb_dma_mapping_error
);
EXPORT_SYMBOL
(
swiotlb_alloc_coherent
);
EXPORT_SYMBOL
(
swiotlb_free_coherent
);
EXPORT_SYMBOL
(
swiotlb_dma_supported
);
arch/ia64/pci/pci.c
View file @
a6a66e46
...
...
@@ -128,7 +128,8 @@ struct pci_raw_ops *raw_pci_ops = &pci_sal_ops; /* default to SAL < 3.2 */
static
int
__init
pci_set_sal_ops
(
void
)
{
if
(
sal_version
>=
SAL_VERSION_CODE
(
3
,
2
))
{
if
(
sal_revision
>=
SAL_VERSION_CODE
(
3
,
2
))
{
printk
(
"Using SAL 3.2 to access PCI config space
\n
"
);
raw_pci_ops
=
&
pci_sal_ext_ops
;
}
return
0
;
...
...
arch/ia64/sn/io/machvec/pci_dma.c
View file @
a6a66e46
...
...
@@ -652,6 +652,13 @@ sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems
}
EXPORT_SYMBOL
(
sn_dma_sync_sg_for_device
);
int
sn_dma_mapping_error
(
dma_addr_t
dma_addr
)
{
return
0
;
}
EXPORT_SYMBOL
(
sn_dma_mapping_error
);
EXPORT_SYMBOL
(
sn_pci_unmap_single
);
EXPORT_SYMBOL
(
sn_pci_map_single
);
EXPORT_SYMBOL
(
sn_pci_dma_sync_single_for_cpu
);
...
...
arch/ia64/sn/io/sn2/shub.c
View file @
a6a66e46
...
...
@@ -160,11 +160,13 @@ static int
shubstats_ioctl
(
struct
inode
*
inode
,
struct
file
*
file
,
unsigned
int
cmd
,
unsigned
long
arg
)
{
cnodeid_t
cnode
;
uint64_t
longarg
;
cnodeid_t
cnode
;
uint64_t
longarg
;
uint64_t
intarg
;
uint64_t
regval
[
2
];
int
nasid
;
cnode
=
(
cnodeid_t
)
file
->
f_dentry
->
d_fsdata
;
cnode
=
(
cnodeid_t
)
(
u64
)
file
->
f_dentry
->
d_fsdata
;
if
(
cnode
<
0
||
cnode
>=
numnodes
)
return
-
ENODEV
;
...
...
@@ -200,6 +202,38 @@ shubstats_ioctl(struct inode *inode, struct file *file,
}
break
;
case
SNDRV_SHUB_GETMMR32
:
intarg
=
shub_mmr_read32
(
cnode
,
arg
);
if
(
copy_to_user
((
void
*
)
arg
,
&
intarg
,
sizeof
(
intarg
)))
{
return
-
EFAULT
;
}
break
;
case
SNDRV_SHUB_GETMMR64
:
case
SNDRV_SHUB_GETMMR64_IO
:
if
(
cmd
==
SNDRV_SHUB_GETMMR64
)
longarg
=
shub_mmr_read
(
cnode
,
arg
);
else
longarg
=
shub_mmr_read_iospace
(
cnode
,
arg
);
if
(
copy_to_user
((
void
*
)
arg
,
&
longarg
,
sizeof
(
longarg
)))
return
-
EFAULT
;
break
;
case
SNDRV_SHUB_PUTMMR64
:
case
SNDRV_SHUB_PUTMMR64_IO
:
if
(
copy_from_user
((
void
*
)
regval
,
(
void
*
)
arg
,
sizeof
(
regval
)))
return
-
EFAULT
;
if
(
regval
[
0
]
&
0x7
)
{
printk
(
"Error: configure_shub_stats: unaligned address 0x%016lx
\n
"
,
regval
[
0
]);
return
-
EINVAL
;
}
if
(
cmd
==
SNDRV_SHUB_PUTMMR64
)
shub_mmr_write
(
cnode
,
(
shubreg_t
)
regval
[
0
],
regval
[
1
]);
else
shub_mmr_write_iospace
(
cnode
,
(
shubreg_t
)
regval
[
0
],
regval
[
1
]);
break
;
default:
return
-
EINVAL
;
}
...
...
@@ -210,255 +244,3 @@ shubstats_ioctl(struct inode *inode, struct file *file,
struct
file_operations
shub_mon_fops
=
{
.
ioctl
=
shubstats_ioctl
,
};
/*
* "linkstatd" kernel thread to export SGI Numalink
* stats via /proc/sgi_sn/linkstats
*/
static
struct
s_linkstats
{
uint64_t
hs_ni_sn_errors
[
2
];
uint64_t
hs_ni_cb_errors
[
2
];
uint64_t
hs_ni_retry_errors
[
2
];
int
hs_ii_up
;
uint64_t
hs_ii_sn_errors
;
uint64_t
hs_ii_cb_errors
;
uint64_t
hs_ii_retry_errors
;
}
*
sn_linkstats
;
static
spinlock_t
sn_linkstats_lock
;
static
unsigned
long
sn_linkstats_starttime
;
static
unsigned
long
sn_linkstats_samples
;
static
unsigned
long
sn_linkstats_overflows
;
static
unsigned
long
sn_linkstats_update_msecs
;
void
sn_linkstats_reset
(
unsigned
long
msecs
)
{
int
cnode
;
uint64_t
iio_wstat
;
uint64_t
llp_csr_reg
;
spin_lock
(
&
sn_linkstats_lock
);
memset
(
sn_linkstats
,
0
,
numnodes
*
sizeof
(
struct
s_linkstats
));
for
(
cnode
=
0
;
cnode
<
numnodes
;
cnode
++
)
{
shub_mmr_write
(
cnode
,
SH_NI0_LLP_ERR
,
0L
);
shub_mmr_write
(
cnode
,
SH_NI1_LLP_ERR
,
0L
);
shub_mmr_write_iospace
(
cnode
,
IIO_LLP_LOG
,
0L
);
/* zero the II retry counter */
iio_wstat
=
shub_mmr_read_iospace
(
cnode
,
IIO_WSTAT
);
iio_wstat
&=
0xffffffffff00ffff
;
/* bits 23:16 */
shub_mmr_write_iospace
(
cnode
,
IIO_WSTAT
,
iio_wstat
);
/* Check if the II xtalk link is working */
llp_csr_reg
=
shub_mmr_read_iospace
(
cnode
,
IIO_LLP_CSR
);
if
(
llp_csr_reg
&
IIO_LLP_CSR_IS_UP
)
sn_linkstats
[
cnode
].
hs_ii_up
=
1
;
}
sn_linkstats_update_msecs
=
msecs
;
sn_linkstats_samples
=
0
;
sn_linkstats_overflows
=
0
;
sn_linkstats_starttime
=
jiffies
;
spin_unlock
(
&
sn_linkstats_lock
);
}
int
linkstatd_thread
(
void
*
unused
)
{
int
cnode
;
int
overflows
;
uint64_t
reg
[
2
];
uint64_t
iio_wstat
=
0L
;
ii_illr_u_t
illr
;
struct
s_linkstats
*
lsp
;
struct
task_struct
*
tsk
=
current
;
daemonize
(
"linkstatd"
);
set_user_nice
(
tsk
,
19
);
sigfillset
(
&
tsk
->
blocked
);
strcpy
(
tsk
->
comm
,
"linkstatd"
);
while
(
1
)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
schedule_timeout
(
sn_linkstats_update_msecs
*
HZ
/
1000
);
spin_lock
(
&
sn_linkstats_lock
);
overflows
=
0
;
for
(
lsp
=
sn_linkstats
,
cnode
=
0
;
cnode
<
numnodes
;
cnode
++
,
lsp
++
)
{
reg
[
0
]
=
shub_mmr_read
(
cnode
,
SH_NI0_LLP_ERR
);
reg
[
1
]
=
shub_mmr_read
(
cnode
,
SH_NI1_LLP_ERR
);
if
(
lsp
->
hs_ii_up
)
{
illr
=
(
ii_illr_u_t
)
shub_mmr_read_iospace
(
cnode
,
IIO_LLP_LOG
);
iio_wstat
=
shub_mmr_read_iospace
(
cnode
,
IIO_WSTAT
);
}
if
(
!
overflows
&&
(
(
reg
[
0
]
&
SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK
)
==
SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK
||
(
reg
[
0
]
&
SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK
)
==
SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK
||
(
reg
[
1
]
&
SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK
)
==
SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK
||
(
reg
[
1
]
&
SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK
)
==
SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK
||
(
lsp
->
hs_ii_up
&&
illr
.
ii_illr_fld_s
.
i_sn_cnt
==
IIO_LLP_SN_MAX
)
||
(
lsp
->
hs_ii_up
&&
illr
.
ii_illr_fld_s
.
i_cb_cnt
==
IIO_LLP_CB_MAX
)))
{
overflows
=
1
;
}
#define LINKSTAT_UPDATE(reg, cnt, mask, shift) cnt += (reg & mask) >> shift
LINKSTAT_UPDATE
(
reg
[
0
],
lsp
->
hs_ni_sn_errors
[
0
],
SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK
,
SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_SHFT
);
LINKSTAT_UPDATE
(
reg
[
1
],
lsp
->
hs_ni_sn_errors
[
1
],
SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK
,
SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_SHFT
);
LINKSTAT_UPDATE
(
reg
[
0
],
lsp
->
hs_ni_cb_errors
[
0
],
SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK
,
SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_SHFT
);
LINKSTAT_UPDATE
(
reg
[
1
],
lsp
->
hs_ni_cb_errors
[
1
],
SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK
,
SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_SHFT
);
LINKSTAT_UPDATE
(
reg
[
0
],
lsp
->
hs_ni_retry_errors
[
0
],
SH_NI0_LLP_ERR_RETRY_COUNT_MASK
,
SH_NI0_LLP_ERR_RETRY_COUNT_SHFT
);
LINKSTAT_UPDATE
(
reg
[
1
],
lsp
->
hs_ni_retry_errors
[
1
],
SH_NI1_LLP_ERR_RETRY_COUNT_MASK
,
SH_NI1_LLP_ERR_RETRY_COUNT_SHFT
);
if
(
lsp
->
hs_ii_up
)
{
/* II sn and cb errors */
lsp
->
hs_ii_sn_errors
+=
illr
.
ii_illr_fld_s
.
i_sn_cnt
;
lsp
->
hs_ii_cb_errors
+=
illr
.
ii_illr_fld_s
.
i_cb_cnt
;
lsp
->
hs_ii_retry_errors
+=
(
iio_wstat
&
0x0000000000ff0000
)
>>
16
;
shub_mmr_write
(
cnode
,
SH_NI0_LLP_ERR
,
0L
);
shub_mmr_write
(
cnode
,
SH_NI1_LLP_ERR
,
0L
);
shub_mmr_write_iospace
(
cnode
,
IIO_LLP_LOG
,
0L
);
/* zero the II retry counter */
iio_wstat
=
shub_mmr_read_iospace
(
cnode
,
IIO_WSTAT
);
iio_wstat
&=
0xffffffffff00ffff
;
/* bits 23:16 */
shub_mmr_write_iospace
(
cnode
,
IIO_WSTAT
,
iio_wstat
);
}
}
sn_linkstats_samples
++
;
if
(
overflows
)
sn_linkstats_overflows
++
;
spin_unlock
(
&
sn_linkstats_lock
);
}
}
static
char
*
rate_per_minute
(
uint64_t
val
,
uint64_t
secs
)
{
static
char
buf
[
16
];
uint64_t
a
=
0
,
b
=
0
,
c
=
0
,
d
=
0
;
if
(
secs
)
{
a
=
60
*
val
/
secs
;
b
=
60
*
10
*
val
/
secs
-
(
10
*
a
);
c
=
60
*
100
*
val
/
secs
-
(
100
*
a
)
-
(
10
*
b
);
d
=
60
*
1000
*
val
/
secs
-
(
1000
*
a
)
-
(
100
*
b
)
-
(
10
*
c
);
}
sprintf
(
buf
,
"%4lu.%lu%lu%lu"
,
a
,
b
,
c
,
d
);
return
buf
;
}
int
sn_linkstats_get
(
char
*
page
)
{
int
n
=
0
;
int
cnode
;
int
nlport
;
struct
s_linkstats
*
lsp
;
nodepda_t
*
npda
;
uint64_t
snsum
=
0
;
uint64_t
cbsum
=
0
;
uint64_t
retrysum
=
0
;
uint64_t
snsum_ii
=
0
;
uint64_t
cbsum_ii
=
0
;
uint64_t
retrysum_ii
=
0
;
uint64_t
secs
;
spin_lock
(
&
sn_linkstats_lock
);
secs
=
(
jiffies
-
sn_linkstats_starttime
)
/
HZ
;
n
+=
sprintf
(
page
,
"# SGI Numalink stats v1 : %lu samples, %lu o/flows, update %lu msecs
\n
"
,
sn_linkstats_samples
,
sn_linkstats_overflows
,
sn_linkstats_update_msecs
);
n
+=
sprintf
(
page
+
n
,
"%-37s %8s %8s %8s %8s
\n
"
,
"# Numalink"
,
"sn errs"
,
"cb errs"
,
"cb/min"
,
"retries"
);
for
(
lsp
=
sn_linkstats
,
cnode
=
0
;
cnode
<
numnodes
;
cnode
++
,
lsp
++
)
{
npda
=
NODEPDA
(
cnode
);
/* two NL links on each SHub */
for
(
nlport
=
0
;
nlport
<
2
;
nlport
++
)
{
cbsum
+=
lsp
->
hs_ni_cb_errors
[
nlport
];
snsum
+=
lsp
->
hs_ni_sn_errors
[
nlport
];
retrysum
+=
lsp
->
hs_ni_retry_errors
[
nlport
];
/* avoid buffer overrun (should be using seq_read API) */
if
(
numnodes
>
64
)
continue
;
n
+=
sprintf
(
page
+
n
,
"/%s/link/%d %8lu %8lu %8s %8lu
\n
"
,
npda
->
hwg_node_name
,
nlport
+
1
,
lsp
->
hs_ni_sn_errors
[
nlport
],
lsp
->
hs_ni_cb_errors
[
nlport
],
rate_per_minute
(
lsp
->
hs_ni_cb_errors
[
nlport
],
secs
),
lsp
->
hs_ni_retry_errors
[
nlport
]);
}
/* one II port on each SHub (may not be connected) */
if
(
lsp
->
hs_ii_up
)
{
n
+=
sprintf
(
page
+
n
,
"/%s/xtalk %8lu %8lu %8s %8lu
\n
"
,
npda
->
hwg_node_name
,
lsp
->
hs_ii_sn_errors
,
lsp
->
hs_ii_cb_errors
,
rate_per_minute
(
lsp
->
hs_ii_cb_errors
,
secs
),
lsp
->
hs_ii_retry_errors
);
snsum_ii
+=
lsp
->
hs_ii_sn_errors
;
cbsum_ii
+=
lsp
->
hs_ii_cb_errors
;
retrysum_ii
+=
lsp
->
hs_ii_retry_errors
;
}
}
n
+=
sprintf
(
page
+
n
,
"%-37s %8lu %8lu %8s %8lu
\n
"
,
"System wide NL totals"
,
snsum
,
cbsum
,
rate_per_minute
(
cbsum
,
secs
),
retrysum
);
n
+=
sprintf
(
page
+
n
,
"%-37s %8lu %8lu %8s %8lu
\n
"
,
"System wide II totals"
,
snsum_ii
,
cbsum_ii
,
rate_per_minute
(
cbsum_ii
,
secs
),
retrysum_ii
);
spin_unlock
(
&
sn_linkstats_lock
);
return
n
;
}
static
int
__init
linkstatd_init
(
void
)
{
if
(
!
ia64_platform_is
(
"sn2"
))
return
-
ENODEV
;
spin_lock_init
(
&
sn_linkstats_lock
);
sn_linkstats
=
kmalloc
(
numnodes
*
sizeof
(
struct
s_linkstats
),
GFP_KERNEL
);
sn_linkstats_reset
(
60000UL
);
/* default 60 second update interval */
kernel_thread
(
linkstatd_thread
,
NULL
,
CLONE_KERNEL
);
return
0
;
}
__initcall
(
linkstatd_init
);
arch/ia64/sn/kernel/mca.c
View file @
a6a66e46
...
...
@@ -108,6 +108,7 @@ sn_platform_plat_specific_err_print(const u8 *sect_header, u8 **oemdata, u64 *oe
down
(
&
sn_oemdata_mutex
);
sn_oemdata
=
oemdata
;
sn_oemdata_size
=
oemdata_size
;
sn_oemdata_bufsize
=
0
;
ia64_sn_plat_specific_err_print
(
print_hook
,
(
char
*
)
psei
);
up
(
&
sn_oemdata_mutex
);
return
0
;
...
...
@@ -120,7 +121,6 @@ int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdat
{
efi_guid_t
guid
=
*
(
efi_guid_t
*
)
sect_header
;
*
oemdata_size
=
0
;
sn_oemdata_bufsize
=
0
;
vfree
(
*
oemdata
);
*
oemdata
=
NULL
;
if
(
efi_guidcmp
(
guid
,
SAL_PLAT_SPECIFIC_ERR_SECT_GUID
)
==
0
)
...
...
arch/ia64/sn/kernel/sn2/sn_proc_fs.c
View file @
a6a66e46
...
...
@@ -73,8 +73,13 @@ register_sn_serial_numbers(void) {
}
}
// Disable forced interrupts, but leave the code in, just in case.
int
sn_force_interrupt_flag
=
0
;
/*
* Enable forced interrupt by default.
* When set, the sn interrupt handler writes the force interrupt register on
* the bridge chip. The hardware will then send an interrupt message if the
* interrupt line is active. This mimics a level sensitive interrupt.
*/
int
sn_force_interrupt_flag
=
1
;
static
int
sn_force_interrupt_read_proc
(
char
*
page
,
char
**
start
,
off_t
off
,
...
...
@@ -113,59 +118,11 @@ register_sn_force_interrupt(void) {
}
}
extern
int
sn_linkstats_get
(
char
*
);
extern
int
sn_linkstats_reset
(
unsigned
long
);
static
int
sn_linkstats_read_proc
(
char
*
page
,
char
**
start
,
off_t
off
,
int
count
,
int
*
eof
,
void
*
data
)
{
return
sn_linkstats_get
(
page
);
}
static
int
sn_linkstats_write_proc
(
struct
file
*
file
,
const
char
*
buffer
,
unsigned
long
count
,
void
*
data
)
{
char
s
[
64
];
unsigned
long
msecs
;
int
e
=
count
;
if
(
copy_from_user
(
s
,
buffer
,
count
<
sizeof
(
s
)
?
count
:
sizeof
(
s
)))
e
=
-
EFAULT
;
else
{
if
(
sscanf
(
s
,
"%lu"
,
&
msecs
)
!=
1
||
msecs
<
5
)
/* at least 5 milliseconds between updates */
e
=
-
EINVAL
;
else
sn_linkstats_reset
(
msecs
);
}
return
e
;
}
void
register_sn_linkstats
(
void
)
{
struct
proc_dir_entry
*
entry
;
if
(
!
sgi_proc_dir
)
{
sgi_proc_dir
=
proc_mkdir
(
"sgi_sn"
,
0
);
}
entry
=
create_proc_entry
(
"linkstats"
,
0444
,
sgi_proc_dir
);
if
(
entry
)
{
entry
->
nlink
=
1
;
entry
->
data
=
0
;
entry
->
read_proc
=
sn_linkstats_read_proc
;
entry
->
write_proc
=
sn_linkstats_write_proc
;
}
}
void
register_sn_procfs
(
void
)
{
register_sn_partition_id
();
register_sn_serial_numbers
();
register_sn_force_interrupt
();
register_sn_linkstats
();
}
#endif
/* CONFIG_PROC_FS */
drivers/char/sn_serial.c
View file @
a6a66e46
...
...
@@ -352,7 +352,7 @@ synch_flush_xmit(void)
if
(
xmit_count
>
0
)
{
result
=
sn_func
->
sal_puts
((
char
*
)
start
,
xmit_count
);
if
(
!
result
)
sn_debug_printf
(
"
\n
*** synch_flush_xmit failed to flush
\n
"
);
DPRINTF
(
"
\n
*** synch_flush_xmit failed to flush
\n
"
);
if
(
result
>
0
)
{
xmit_count
-=
result
;
sn_total_tx_count
+=
result
;
...
...
@@ -389,12 +389,12 @@ sn_poll_transmit_chars(void)
xmit_count
=
(
head
<
tail
)
?
(
SN_SAL_BUFFER_SIZE
-
tail
)
:
(
head
-
tail
);
if
(
xmit_count
==
0
)
sn_debug_printf
(
"
\n
*** empty xmit_count
\n
"
);
DPRINTF
(
"
\n
*** empty xmit_count
\n
"
);
/* use the ops, as we could be on the simulator */
result
=
sn_func
->
sal_puts
((
char
*
)
start
,
xmit_count
);
if
(
!
result
)
sn_debug_printf
(
"
\n
*** error in synchronous sal_puts
\n
"
);
DPRINTF
(
"
\n
*** error in synchronous sal_puts
\n
"
);
/* XXX chadt clean this up */
if
(
result
>
0
)
{
xmit_count
-=
result
;
...
...
@@ -447,7 +447,7 @@ sn_intr_transmit_chars(void)
result
=
ia64_sn_console_xmit_chars
((
char
*
)
start
,
xmit_count
);
#ifdef DEBUG
if
(
!
result
)
sn_debug_printf
(
"`"
);
DPRINTF
(
"`"
);
#endif
if
(
result
>
0
)
{
xmit_count
-=
result
;
...
...
@@ -511,7 +511,7 @@ sn_sal_connect_interrupt(void)
if
(
result
>=
0
)
return
console_irq
;
printk
(
KERN_
INFO
"sn_serial: console proceeding in polled mode
\n
"
);
printk
(
KERN_
WARNING
"sn_serial: console proceeding in polled mode
\n
"
);
return
0
;
}
...
...
@@ -823,7 +823,7 @@ sn_sal_switch_to_asynch(void)
return
;
}
sn_debug_printf
(
"sn_serial: switch to asynchronous console
\n
"
);
DPRINTF
(
"sn_serial: switch to asynchronous console
\n
"
);
/* early_printk invocation may have done this for us */
if
(
!
sn_func
)
{
...
...
@@ -859,7 +859,7 @@ sn_sal_switch_to_interrupts(void)
{
int
irq
;
sn_debug_printf
(
"sn_serial: switching to interrupt driven console
\n
"
);
DPRINTF
(
"sn_serial: switching to interrupt driven console
\n
"
);
irq
=
sn_sal_connect_interrupt
();
if
(
irq
)
{
...
...
@@ -883,7 +883,7 @@ sn_sal_module_init(void)
{
int
retval
;
printk
(
"sn_serial: sn_sal_module_init
\n
"
);
DPRINTF
(
"sn_serial: sn_sal_module_init
\n
"
);
if
(
!
ia64_platform_is
(
"sn2"
))
return
-
ENODEV
;
...
...
@@ -1016,7 +1016,7 @@ sn_sal_serial_console_init(void)
{
if
(
ia64_platform_is
(
"sn2"
))
{
sn_sal_switch_to_asynch
();
sn_debug_printf
(
"sn_sal_serial_console_init : register console
\n
"
);
DPRINTF
(
"sn_sal_serial_console_init : register console
\n
"
);
register_console
(
&
sal_console
);
}
return
0
;
...
...
include/asm-ia64/dma-mapping.h
View file @
a6a66e46
...
...
@@ -2,7 +2,7 @@
#define _ASM_IA64_DMA_MAPPING_H
/*
* Copyright (C) 2003 Hewlett-Packard Co
* Copyright (C) 2003
-2004
Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
...
...
@@ -18,6 +18,7 @@
#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
#define dma_sync_single_for_device platform_dma_sync_single_for_device
#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
#define dma_mapping_error platform_dma_mapping_error
#define dma_map_page(dev, pg, off, size, dir) \
dma_map_single(dev, page_address(pg) + (off), (size), (dir))
...
...
include/asm-ia64/machvec.h
View file @
a6a66e46
...
...
@@ -4,7 +4,7 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* Copyright (C) 1999-2001, 2003 Hewlett-Packard Co.
* Copyright (C) 1999-2001, 2003
-2004
Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef _ASM_IA64_MACHVEC_H
...
...
@@ -21,12 +21,8 @@ struct irq_desc;
struct
page
;
typedef
void
ia64_mv_setup_t
(
char
**
);
typedef
void
ia64_mv_cpu_init_t
(
void
);
typedef
void
ia64_mv_cpu_init_t
(
void
);
typedef
void
ia64_mv_irq_init_t
(
void
);
typedef
void
ia64_mv_mca_init_t
(
void
);
typedef
void
ia64_mv_mca_handler_t
(
void
);
typedef
void
ia64_mv_cmci_handler_t
(
int
,
void
*
,
struct
pt_regs
*
);
typedef
void
ia64_mv_log_print_t
(
void
);
typedef
void
ia64_mv_send_ipi_t
(
int
,
int
,
int
,
int
);
typedef
void
ia64_mv_timer_interrupt_t
(
int
,
void
*
,
struct
pt_regs
*
);
typedef
void
ia64_mv_global_tlb_purge_t
(
unsigned
long
,
unsigned
long
,
unsigned
long
);
...
...
@@ -46,6 +42,7 @@ typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_
typedef
void
ia64_mv_dma_sync_sg_for_cpu
(
struct
device
*
,
struct
scatterlist
*
,
int
,
int
);
typedef
void
ia64_mv_dma_sync_single_for_device
(
struct
device
*
,
dma_addr_t
,
size_t
,
int
);
typedef
void
ia64_mv_dma_sync_sg_for_device
(
struct
device
*
,
struct
scatterlist
*
,
int
,
int
);
typedef
int
ia64_mv_dma_mapping_error
(
dma_addr_t
dma_addr
);
typedef
int
ia64_mv_dma_supported
(
struct
device
*
,
u64
);
/*
...
...
@@ -73,7 +70,10 @@ typedef unsigned int ia64_mv_readl_relaxed_t (void *);
typedef
unsigned
long
ia64_mv_readq_relaxed_t
(
void
*
);
extern
void
machvec_noop
(
void
);
extern
void
machvec_memory_fence
(
void
);
extern
void
machvec_setup
(
char
**
);
extern
void
machvec_timer_interrupt
(
int
,
void
*
,
struct
pt_regs
*
);
extern
void
machvec_dma_sync_single
(
struct
device
*
,
dma_addr_t
,
size_t
,
int
);
extern
void
machvec_dma_sync_sg
(
struct
device
*
,
struct
scatterlist
*
,
int
,
int
);
# if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h>
...
...
@@ -92,10 +92,6 @@ extern void machvec_memory_fence (void);
# define platform_setup ia64_mv.setup
# define platform_cpu_init ia64_mv.cpu_init
# define platform_irq_init ia64_mv.irq_init
# define platform_mca_init ia64_mv.mca_init
# define platform_mca_handler ia64_mv.mca_handler
# define platform_cmci_handler ia64_mv.cmci_handler
# define platform_log_print ia64_mv.log_print
# define platform_send_ipi ia64_mv.send_ipi
# define platform_timer_interrupt ia64_mv.timer_interrupt
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
...
...
@@ -110,6 +106,7 @@ extern void machvec_memory_fence (void);
# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
# define platform_dma_mapping_error ia64_mv.dma_mapping_error
# define platform_dma_supported ia64_mv.dma_supported
# define platform_irq_desc ia64_mv.irq_desc
# define platform_irq_to_vector ia64_mv.irq_to_vector
...
...
@@ -140,10 +137,6 @@ struct ia64_machine_vector {
ia64_mv_setup_t
*
setup
;
ia64_mv_cpu_init_t
*
cpu_init
;
ia64_mv_irq_init_t
*
irq_init
;
ia64_mv_mca_init_t
*
mca_init
;
ia64_mv_mca_handler_t
*
mca_handler
;
ia64_mv_cmci_handler_t
*
cmci_handler
;
ia64_mv_log_print_t
*
log_print
;
ia64_mv_send_ipi_t
*
send_ipi
;
ia64_mv_timer_interrupt_t
*
timer_interrupt
;
ia64_mv_global_tlb_purge_t
*
global_tlb_purge
;
...
...
@@ -158,6 +151,7 @@ struct ia64_machine_vector {
ia64_mv_dma_sync_sg_for_cpu
*
dma_sync_sg_for_cpu
;
ia64_mv_dma_sync_single_for_device
*
dma_sync_single_for_device
;
ia64_mv_dma_sync_sg_for_device
*
dma_sync_sg_for_device
;
ia64_mv_dma_mapping_error
*
dma_mapping_error
;
ia64_mv_dma_supported
*
dma_supported
;
ia64_mv_irq_desc
*
irq_desc
;
ia64_mv_irq_to_vector
*
irq_to_vector
;
...
...
@@ -184,10 +178,6 @@ struct ia64_machine_vector {
platform_setup, \
platform_cpu_init, \
platform_irq_init, \
platform_mca_init, \
platform_mca_handler, \
platform_cmci_handler, \
platform_log_print, \
platform_send_ipi, \
platform_timer_interrupt, \
platform_global_tlb_purge, \
...
...
@@ -202,6 +192,7 @@ struct ia64_machine_vector {
platform_dma_sync_sg_for_cpu, \
platform_dma_sync_single_for_device, \
platform_dma_sync_sg_for_device, \
platform_dma_mapping_error, \
platform_dma_supported, \
platform_irq_desc, \
platform_irq_to_vector, \
...
...
@@ -243,6 +234,7 @@ extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
extern
ia64_mv_dma_sync_sg_for_cpu
swiotlb_sync_sg_for_cpu
;
extern
ia64_mv_dma_sync_single_for_device
swiotlb_sync_single_for_device
;
extern
ia64_mv_dma_sync_sg_for_device
swiotlb_sync_sg_for_device
;
extern
ia64_mv_dma_mapping_error
swiotlb_dma_mapping_error
;
extern
ia64_mv_dma_supported
swiotlb_dma_supported
;
/*
...
...
@@ -250,31 +242,20 @@ extern ia64_mv_dma_supported swiotlb_dma_supported;
* to update the machvec files for all existing platforms.
*/
#ifndef platform_setup
# define platform_setup
((ia64_mv_setup_t *) machvec_noop)
# define platform_setup
machvec_setup
#endif
#ifndef platform_cpu_init
# define platform_cpu_init
((ia64_mv_cpu_init_t *) machvec_noop)
# define platform_cpu_init
machvec_noop
#endif
#ifndef platform_irq_init
# define platform_irq_init ((ia64_mv_irq_init_t *) machvec_noop)
#endif
#ifndef platform_mca_init
# define platform_mca_init ((ia64_mv_mca_init_t *) machvec_noop)
#endif
#ifndef platform_mca_handler
# define platform_mca_handler ((ia64_mv_mca_handler_t *) machvec_noop)
#endif
#ifndef platform_cmci_handler
# define platform_cmci_handler ((ia64_mv_cmci_handler_t *) machvec_noop)
#endif
#ifndef platform_log_print
# define platform_log_print ((ia64_mv_log_print_t *) machvec_noop)
# define platform_irq_init machvec_noop
#endif
#ifndef platform_send_ipi
# define platform_send_ipi ia64_send_ipi
/* default to architected version */
# define platform_send_ipi
ia64_send_ipi
/* default to architected version */
#endif
#ifndef platform_timer_interrupt
# define platform_timer_interrupt
((ia64_mv_timer_interrupt_t *) machvec_noop)
# define platform_timer_interrupt
machvec_timer_interrupt
#endif
#ifndef platform_global_tlb_purge
# define platform_global_tlb_purge ia64_global_tlb_purge
/* default to architected version */
...
...
@@ -312,6 +293,9 @@ extern ia64_mv_dma_supported swiotlb_dma_supported;
#ifndef platform_dma_sync_sg_for_device
# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
#endif
#ifndef platform_dma_mapping_error
# define platform_dma_mapping_error swiotlb_dma_mapping_error
#endif
#ifndef platform_dma_supported
# define platform_dma_supported swiotlb_dma_supported
#endif
...
...
include/asm-ia64/machvec_hpzx1.h
View file @
a6a66e46
...
...
@@ -9,6 +9,7 @@ extern ia64_mv_dma_unmap_single sba_unmap_single;
extern
ia64_mv_dma_map_sg
sba_map_sg
;
extern
ia64_mv_dma_unmap_sg
sba_unmap_sg
;
extern
ia64_mv_dma_supported
sba_dma_supported
;
extern
ia64_mv_dma_mapping_error
sba_dma_mapping_error
;
/*
* This stuff has dual use!
...
...
@@ -19,17 +20,18 @@ extern ia64_mv_dma_supported sba_dma_supported;
*/
#define platform_name "hpzx1"
#define platform_setup dig_setup
#define platform_dma_init
((ia64_mv_dma_init *) machvec_noop)
#define platform_dma_init
machvec_noop
#define platform_dma_alloc_coherent sba_alloc_coherent
#define platform_dma_free_coherent sba_free_coherent
#define platform_dma_map_single sba_map_single
#define platform_dma_unmap_single sba_unmap_single
#define platform_dma_map_sg sba_map_sg
#define platform_dma_unmap_sg sba_unmap_sg
#define platform_dma_sync_single_for_cpu ((ia64_mv_dma_sync_single_for_cpu *) machvec_memory_fence)
#define platform_dma_sync_sg_for_cpu ((ia64_mv_dma_sync_sg_for_cpu *) machvec_memory_fence)
#define platform_dma_sync_single_for_device ((ia64_mv_dma_sync_single_for_device *) machvec_memory_fence)
#define platform_dma_sync_sg_for_device ((ia64_mv_dma_sync_sg_for_device *) machvec_memory_fence)
#define platform_dma_supported sba_dma_supported
#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
#define platform_dma_sync_single_for_device machvec_dma_sync_single
#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
#define platform_dma_supported sba_dma_supported
#define platform_dma_mapping_error sba_dma_mapping_error
#endif
/* _ASM_IA64_MACHVEC_HPZX1_h */
include/asm-ia64/machvec_sn2.h
View file @
a6a66e46
...
...
@@ -66,6 +66,7 @@ extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
extern
ia64_mv_dma_sync_sg_for_cpu
sn_dma_sync_sg_for_cpu
;
extern
ia64_mv_dma_sync_single_for_device
sn_dma_sync_single_for_device
;
extern
ia64_mv_dma_sync_sg_for_device
sn_dma_sync_sg_for_device
;
extern
ia64_mv_dma_mapping_error
sn_dma_mapping_error
;
extern
ia64_mv_dma_supported
sn_dma_supported
;
/*
...
...
@@ -111,6 +112,7 @@ extern ia64_mv_dma_supported sn_dma_supported;
#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
#define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported
#include <asm/sn/sn2/io.h>
...
...
include/asm-ia64/sn/sndrv.h
View file @
a6a66e46
...
...
@@ -41,6 +41,11 @@
#define SNDRV_SHUB_RESETSTATS 42
#define SNDRV_SHUB_GETSTATS 43
#define SNDRV_SHUB_GETNASID 44
#define SNDRV_SHUB_GETMMR32 45
#define SNDRV_SHUB_GETMMR64 46
#define SNDRV_SHUB_GETMMR64_IO 47
#define SNDRV_SHUB_PUTMMR64 48
#define SNDRV_SHUB_PUTMMR64_IO 49
/* Devices */
#define SNDRV_UKNOWN_DEVICE -1
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment