Commit 136e336b authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

[PATCH] drivers/parisc

This huge patch moves a bunch of drivers from arch/parisc/kernel to
drivers/parisc and adds some new drivers in drivers/parisc.
parent 5088d993
# HP 712 kernel keymap. This uses 7 modifier combinations.
keymaps 0-2,4-5,8,12
# ie, plain, Shift, AltGr, Control, Control+Shift, Alt and Control+Alt
# Change the above line into
# keymaps 0-2,4-6,8,12
# in case you want the entries
# altgr control keycode 83 = Boot
# altgr control keycode 111 = Boot
# below.
#
# In fact AltGr is used very little, and one more keymap can
# be saved by mapping AltGr to Alt (and adapting a few entries):
# keycode 100 = Alt
#
keycode 1 = F9 F19 Console_21
control keycode 1 = F9
alt keycode 1 = Console_9
control alt keycode 1 = Console_9
keycode 2 =
keycode 3 = F5 F15 Console_17
control keycode 3 = F5
alt keycode 3 = Console_5
control alt keycode 3 = Console_5
keycode 4 = F3 F13 Console_15
control keycode 4 = F3
alt keycode 4 = Console_3
control alt keycode 4 = Console_3
keycode 5 = F1 F11 Console_13
control keycode 5 = F1
alt keycode 5 = Console_1
control alt keycode 5 = Console_1
keycode 6 = F2 F12 Console_14
control keycode 6 = F2
alt keycode 6 = Console_2
control alt keycode 6 = Console_2
keycode 7 = F12 F12 Console_24
control keycode 7 = F12
alt keycode 7 = Console_12
control alt keycode 7 = Console_12
keycode 8 =
keycode 9 = F10 F20 Console_22
control keycode 9 = F10
alt keycode 9 = Console_10
control alt keycode 9 = Console_10
keycode 10 = F8 F18 Console_20
control keycode 10 = F8
alt keycode 10 = Console_8
control alt keycode 10 = Console_8
keycode 11 = F6 F16 Console_18
control keycode 11 = F6
alt keycode 11 = Console_6
control alt keycode 11 = Console_6
keycode 12 = F4 F14 Console_16
control keycode 12 = F4
alt keycode 12 = Console_4
control alt keycode 12 = Console_4
keycode 13 = Tab Tab
alt keycode 13 = Meta_Tab
keycode 14 = grave asciitilde
control keycode 14 = nul
alt keycode 14 = Meta_grave
keycode 15 =
keycode 16 =
keycode 17 = Alt
keycode 18 = Shift
keycode 19 =
keycode 20 = Control
keycode 21 = q
keycode 22 = one exclam exclam
keycode 23 =
keycode 24 =
keycode 25 =
keycode 26 = z
keycode 27 = s
keycode 28 = a
altgr keycode 28 = Hex_A
keycode 29 = w
keycode 30 = two at at
keycode 31 =
keycode 32 =
keycode 33 = c
altgr keycode 46 = Hex_C
keycode 34 = x
keycode 35 = d
altgr keycode 35 = Hex_D
keycode 36 = e
altgr keycode 36 = Hex_E
keycode 37 = four dollar
keycode 38 = three numbersign
keycode 39 =
keycode 40 =
keycode 41 =
keycode 42 = v
keycode 43 = f
altgr keycode 43 = Hex_F
keycode 44 = t
keycode 45 = r
keycode 46 = five percent
keycode 47 =
keycode 48 =
keycode 49 = n
keycode 50 = b
altgr keycode 50 = Hex_B
keycode 51 = h
keycode 52 = g
keycode 53 = y
keycode 54 = six asciicircum
keycode 55 =
keycode 56 =
keycode 57 =
keycode 58 = m
keycode 59 = j
keycode 60 = u
keycode 61 = seven ampersand
keycode 62 = eight asterisk asterisk
keycode 63 =
keycode 64 =
keycode 65 = comma less
alt keycode 65 = Meta_comma
keycode 66 = k
keycode 67 = i
keycode 68 = o
keycode 69 = zero parenright bracketright
keycode 70 = nine parenleft bracketleft
keycode 71 =
keycode 72 =
keycode 73 = period greater
control keycode 73 = Compose
alt keycode 73 = Meta_period
keycode 74 = slash question
control keycode 74 = Delete
alt keycode 53 = Meta_slash
keycode 75 = l
keycode 76 = semicolon colon
alt keycode 39 = Meta_semicolon
keycode 77 = p
keycode 78 = minus underscore
keycode 79 =
keycode 80 =
keycode 81 =
keycode 82 = apostrophe quotedbl
control keycode 82 = Control_g
alt keycode 40 = Meta_apostrophe
keycode 83 =
keycode 84 = bracketleft braceleft
control keycode 84 = Escape
alt keycode 26 = Meta_bracketleft
keycode 85 = equal plus
keycode 86 =
keycode 87 =
keycode 88 = Caps_Lock
keycode 88 =
keycode 89 =
keycode 89 =
keycode 89 =
keycode 90 = Return
alt keycode 90 = Meta_Control_m
keycode 91 = bracketright braceright asciitilde
control keycode 91 = Control_bracketright
alt keycode 91 = Meta_bracketright
keycode 92 =
keycode 93 = backslash bar
control keycode 43 = Control_backslash
alt keycode 43 = Meta_backslash
keycode 94 =
keycode 95 =
keycode 96 =
keycode 97 =
keycode 98 =
keycode 99 =
keycode 100 =
keycode 101 =
keycode 102 = BackSpace
keycode 103 =
keycode 104 =
keycode 105 = KP_1
alt keycode 105 = Ascii_1
altgr keycode 105 = Hex_1
keycode 106 =
keycode 107 = KP_4
alt keycode 107 = Ascii_4
altgr keycode 107 = Hex_4
keycode 108 = KP_7
alt keycode 108 = Ascii_7
altgr keycode 108 = Hex_7
keycode 109 =
keycode 110 =
keycode 111 =
keycode 112 = KP_0
alt keycode 82 = Ascii_0
altgr keycode 82 = Hex_0
keycode 113 = KP_Period
keycode 114 = KP_2
alt keycode 114 = Ascii_2
altgr keycode 114 = Hex_2
keycode 115 = KP_5
alt keycode 115 = Ascii_5
altgr keycode 115 = Hex_5
keycode 116 = KP_6
alt keycode 116 = Ascii_6
altgr keycode 116 = Hex_6
keycode 117 = KP_8
alt keycode 117 = Ascii_8
altgr keycode 117 = Hex_8
keycode 118 = Escape
keycode 119 =
keycode 120 = F11
keycode 121 = KP_Add
keycode 122 = KP_3
alt keycode 122 = Ascii_3
altgr keycode 122 = Hex_3
keycode 123 = KP_Subtract
keycode 124 = KP_Multiply
keycode 125 = KP_9
alt keycode 125 = Ascii_9
altgr keycode 125 = Hex_9
keycode 126 =
# 131!!
keycode 127 = F7 F17 Console_19
control keycode 127 = F7
alt keycode 127 = Console_7
control alt keycode 127 = Console_7
string F1 = "\033[[A"
string F2 = "\033[[B"
string F3 = "\033[[C"
string F4 = "\033[[D"
string F5 = "\033[[E"
string F6 = "\033[17~"
string F7 = "\033[18~"
string F8 = "\033[19~"
string F9 = "\033[20~"
string F10 = "\033[21~"
string F11 = "\033[23~"
string F12 = "\033[24~"
string F13 = "\033[25~"
string F14 = "\033[26~"
string F15 = "\033[28~"
string F16 = "\033[29~"
string F17 = "\033[31~"
string F18 = "\033[32~"
string F19 = "\033[33~"
string F20 = "\033[34~"
string Find = "\033[1~"
string Insert = "\033[2~"
string Remove = "\033[3~"
string Select = "\033[4~"
string Prior = "\033[5~"
string Next = "\033[6~"
string Macro = "\033[M"
string Pause = "\033[P"
compose '`' 'A' to ''
compose '`' 'a' to ''
compose '\'' 'A' to ''
compose '\'' 'a' to ''
compose '^' 'A' to ''
compose '^' 'a' to ''
compose '~' 'A' to ''
compose '~' 'a' to ''
compose '"' 'A' to ''
compose '"' 'a' to ''
compose 'O' 'A' to ''
compose 'o' 'a' to ''
compose '0' 'A' to ''
compose '0' 'a' to ''
compose 'A' 'A' to ''
compose 'a' 'a' to ''
compose 'A' 'E' to ''
compose 'a' 'e' to ''
compose ',' 'C' to ''
compose ',' 'c' to ''
compose '`' 'E' to ''
compose '`' 'e' to ''
compose '\'' 'E' to ''
compose '\'' 'e' to ''
compose '^' 'E' to ''
compose '^' 'e' to ''
compose '"' 'E' to ''
compose '"' 'e' to ''
compose '`' 'I' to ''
compose '`' 'i' to ''
compose '\'' 'I' to ''
compose '\'' 'i' to ''
compose '^' 'I' to ''
compose '^' 'i' to ''
compose '"' 'I' to ''
compose '"' 'i' to ''
compose '-' 'D' to ''
compose '-' 'd' to ''
compose '~' 'N' to ''
compose '~' 'n' to ''
compose '`' 'O' to ''
compose '`' 'o' to ''
compose '\'' 'O' to ''
compose '\'' 'o' to ''
compose '^' 'O' to ''
compose '^' 'o' to ''
compose '~' 'O' to ''
compose '~' 'o' to ''
compose '"' 'O' to ''
compose '"' 'o' to ''
compose '/' 'O' to ''
compose '/' 'o' to ''
compose '`' 'U' to ''
compose '`' 'u' to ''
compose '\'' 'U' to ''
compose '\'' 'u' to ''
compose '^' 'U' to ''
compose '^' 'u' to ''
compose '"' 'U' to ''
compose '"' 'u' to ''
compose '\'' 'Y' to ''
compose '\'' 'y' to ''
compose 'T' 'H' to ''
compose 't' 'h' to ''
compose 's' 's' to ''
compose '"' 'y' to ''
compose 's' 'z' to ''
compose 'i' 'j' to ''
/* arch/parisc/kernel/pdc.c - safe pdc access routines
*
* Copyright 1999 SuSE GmbH Nuernberg (Philipp Rumpf, prumpf@tux.org)
* portions Copyright 1999 The Puffin Group, (Alex deVries, David Kennedy)
*
* only these routines should be used out of the real kernel (i.e. everything
* using virtual addresses) for obvious reasons */
/* I think it would be in everyone's best interest to follow this
* guidelines when writing PDC wrappers:
*
* - the name of the pdc wrapper should match one of the macros
* used for the first two arguments
* - don't use caps for random parts of the name
* - use ASSERT_ALIGN to ensure the aligment of the arguments is
* correct
* - use __pa() to convert virtual (kernel) pointers to physical
* ones.
* - the name of the struct used for pdc return values should equal
* one of the macros used for the first two arguments to the
* corresponding PDC call
* - keep the order of arguments
* - don't be smart (setting trailing NUL bytes for strings, return
* something useful even if the call failed) unless you are sure
* it's not going to affect functionality or performance
*
* Example:
* int pdc_cache_info(struct pdc_cache_info *cache_info )
* {
* ASSERT_ALIGN(cache_info, 8);
*
* return mem_pdc_call(PDC_CACHE,PDC_CACHE_INFO,__pa(cache_info),0);
* }
* prumpf 991016
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/real.h>
#include <asm/system.h>
#define ASSERT_ALIGN(ptr, align) \
do { if(((unsigned long)(ptr)) & (align-1)) { \
printk("PDC: %s:%d %s() called with " \
"unaligned argument from %p", __FILE__, __LINE__, \
__FUNCTION__, __builtin_return_address(0)); \
\
return -1; \
} } while(0)
/* verify address can be accessed without an HPMC */
int pdc_add_valid(void *address)
{
ASSERT_ALIGN(address, 4);
return mem_pdc_call(PDC_ADD_VALID, PDC_ADD_VALID_VERIFY, (unsigned long)address);
}
#if 0
int pdc_chassis_warn(struct pdc_chassis_warn *address)
{
ASSERT_ALIGN(address, 4);
return mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_WARN, __pa(address), 0);
}
#endif
int pdc_chassis_disp(unsigned long disp)
{
return mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_DISP, disp);
}
int pdc_chassis_info(void *pdc_result, void *chassis_info, unsigned long len)
{
ASSERT_ALIGN(pdc_result, 4);
ASSERT_ALIGN(chassis_info, 4);
return mem_pdc_call(PDC_CHASSIS,PDC_RETURN_CHASSIS_INFO,
__pa(pdc_result), __pa(chassis_info), len);
}
int pdc_hpa_processor(void *address)
{
/* We're using 0 for the last parameter just to make sure.
It's actually HVERSION dependant. And remember, life is
hard without a backspace. */
ASSERT_ALIGN(address, 4);
return mem_pdc_call(PDC_HPA, PDC_HPA_PROCESSOR, __pa(address),0);
}
#if 0
int pdc_hpa_modules(void *address)
{
return mem_pdc_call(PDC_HPA, PDC_HPA_MODULES, address);
}
#endif
int pdc_iodc_read(void *address, void * hpa, unsigned int index,
void * iodc_data, unsigned int iodc_data_size)
{
ASSERT_ALIGN(address, 4);
ASSERT_ALIGN(iodc_data, 8);
return mem_pdc_call(PDC_IODC, PDC_IODC_READ,
__pa(address), hpa, index, __pa(iodc_data), iodc_data_size);
}
int pdc_system_map_find_mods(void *pdc_mod_info,
void *mod_path, int index)
{
return mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE,
__pa(pdc_mod_info), __pa(mod_path), (long)index);
}
int pdc_model_info(struct pdc_model *model) {
ASSERT_ALIGN(model, 8);
return mem_pdc_call(PDC_MODEL,PDC_MODEL_INFO,__pa(model),0);
}
/* get system model name from PDC ROM (e.g. 9000/715 or 9000/778/B160L) */
int pdc_model_sysmodel(char * name)
{
struct pdc_model_sysmodel sys_model;
int retval;
ASSERT_ALIGN(&sys_model, 8);
ASSERT_ALIGN(name, 4);
sys_model.mod_len = 0;
retval = mem_pdc_call(PDC_MODEL,PDC_MODEL_SYSMODEL,__pa(&sys_model),
OS_ID_HPUX,__pa(name));
if (retval == PDC_RET_OK)
name[sys_model.mod_len] = '\0'; /* add trailing '\0' */
else
name[0] = 0;
return retval;
}
/* id: 0 = cpu revision, 1 = boot-rom-version */
int pdc_model_versions(struct pdc_model_cpuid *cpu_id, int id) {
return mem_pdc_call(PDC_MODEL,PDC_MODEL_VERSIONS,__pa(cpu_id),id);
}
int pdc_model_cpuid(struct pdc_model_cpuid *cpu_id) {
cpu_id->cpuid = 0; /* preset zero (call maybe not implemented!) */
return mem_pdc_call(PDC_MODEL,6,__pa(cpu_id),0); /* 6="return CPU ID" */
}
int pdc_cache_info(struct pdc_cache_info *cache_info) {
ASSERT_ALIGN(cache_info, 8);
return mem_pdc_call(PDC_CACHE,PDC_CACHE_INFO,__pa(cache_info),0);
}
#ifndef __LP64__
int pdc_btlb_info( struct pdc_btlb_info *btlb ) {
int status;
status = mem_pdc_call(PDC_BLOCK_TLB,PDC_BTLB_INFO,__pa(btlb),0);
if (status<0) btlb->max_size = 0;
return status;
}
int pdc_mem_map_hpa(void *r_addr, void *mod_path) {
return mem_pdc_call(PDC_MEM_MAP,PDC_MEM_MAP_HPA,
__pa(r_addr),__pa(mod_path));
}
int pdc_lan_station_id(char *lan_addr, void *net_hpa) {
struct pdc_lan_station_id id;
unsigned char *addr;
if (mem_pdc_call(PDC_LAN_STATION_ID, PDC_LAN_STATION_ID_READ,
__pa(&id), net_hpa) < 0)
addr = 0; /* FIXME: else read MAC from NVRAM */
else
addr = id.addr;
if (addr)
memmove( lan_addr, addr, PDC_LAN_STATION_ID_SIZE);
else
memset( lan_addr, 0, PDC_LAN_STATION_ID_SIZE);
return (addr != 0);
}
#endif
/* Similar to PDC_PAT stuff in pdcpat.c - but added for Forte/Allegro boxes */
int pdc_pci_irt_size(void *r_addr, void *hpa)
{
return mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL_SIZE,
__pa(r_addr), hpa);
}
int pdc_pci_irt(void *r_addr, void *hpa, void *tbl)
{
return mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL,
__pa(r_addr), hpa, __pa(tbl));
}
/* access the TOD clock */
int pdc_tod_read(struct pdc_tod *tod)
{
ASSERT_ALIGN(tod, 8);
return mem_pdc_call(PDC_TOD, PDC_TOD_READ, __pa(tod), 0);
}
int pdc_tod_set(unsigned long sec, unsigned long usec)
{
return mem_pdc_call(PDC_TOD, PDC_TOD_WRITE, sec, usec);
}
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
*
* most of these calls might reasonably be moved to ../kernel -PB
*
* The basic principle is to construct a stack frame in C then call
* some assembly which adopts that stack, does some rfi magic, may
* switch wide/narrow mode, and calls the routine described by the
* 'fn' parameter WHICH IS NOT A FUNCTION POINTER!!!!!!!!!!!!!!!!
*/
#include <linux/spinlock.h>
#include <asm/system.h>
#include <stdarg.h>
#include <asm/pgtable.h> /* for __pa() */
#include <asm/pdc.h>
static spinlock_t pdc_lock = SPIN_LOCK_UNLOCKED;
/***************** 32-bit real-mode calls ***********/
/* The struct below is used
* to overlay real_stack (real2.S), preparing a 32-bit call frame.
* real32_call_asm() then uses this stack in narrow real mode
*/
struct narrow_stack {
/* use int, not long which is 64 bits */
unsigned int arg13;
unsigned int arg12;
unsigned int arg11;
unsigned int arg10;
unsigned int arg9;
unsigned int arg8;
unsigned int arg7;
unsigned int arg6;
unsigned int arg5;
unsigned int arg4;
unsigned int arg3;
unsigned int arg2;
unsigned int arg1;
unsigned int arg0;
unsigned int frame_marker[8];
unsigned int sp;
/* in reality, there's nearly 8k of stack after this */
};
long
real32_call(unsigned long fn, ...)
{
unsigned long r;
va_list args;
unsigned long flags;
extern struct narrow_stack real_stack;
extern unsigned long real32_call_asm(unsigned int *,
unsigned int *, unsigned int);
va_start(args, fn);
real_stack.arg0 = va_arg(args, unsigned int);
real_stack.arg1 = va_arg(args, unsigned int);
real_stack.arg2 = va_arg(args, unsigned int);
real_stack.arg3 = va_arg(args, unsigned int);
real_stack.arg4 = va_arg(args, unsigned int);
real_stack.arg5 = va_arg(args, unsigned int);
real_stack.arg6 = va_arg(args, unsigned int);
real_stack.arg7 = va_arg(args, unsigned int);
real_stack.arg8 = va_arg(args, unsigned int);
real_stack.arg9 = va_arg(args, unsigned int);
real_stack.arg10 = va_arg(args, unsigned int);
real_stack.arg11 = va_arg(args, unsigned int);
real_stack.arg12 = va_arg(args, unsigned int);
real_stack.arg13 = va_arg(args, unsigned int);
va_end(args);
if (fn == 0) {
/* mem_pdc call */
fn = PAGE0->mem_pdc;
}
spin_lock_irqsave(&pdc_lock, flags);
r = real32_call_asm(&real_stack.sp, &real_stack.arg0, fn);
spin_unlock_irqrestore(&pdc_lock, flags);
return r;
}
#ifdef __LP64__
/***************** 64-bit real-mode calls ***********/
struct wide_stack {
unsigned long arg0;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
unsigned long arg6;
unsigned long arg7;
unsigned long arg8;
unsigned long arg9;
unsigned long arg10;
unsigned long arg11;
unsigned long arg12;
unsigned long arg13;
unsigned long frame_marker[2]; /* rp, previous sp */
unsigned long sp;
/* in reality, there's nearly 8k of stack after this */
};
long
real64_call(unsigned long fn, ...)
{
unsigned long r;
va_list args;
unsigned long flags;
extern struct wide_stack real_stack;
extern unsigned long real64_call_asm(unsigned long *,
unsigned long *, unsigned long);
va_start(args, fn);
real_stack.arg0 = va_arg(args, unsigned long);
real_stack.arg1 = va_arg(args, unsigned long);
real_stack.arg2 = va_arg(args, unsigned long);
real_stack.arg3 = va_arg(args, unsigned long);
real_stack.arg4 = va_arg(args, unsigned long);
real_stack.arg5 = va_arg(args, unsigned long);
real_stack.arg6 = va_arg(args, unsigned long);
real_stack.arg7 = va_arg(args, unsigned long);
real_stack.arg8 = va_arg(args, unsigned long);
real_stack.arg9 = va_arg(args, unsigned long);
real_stack.arg10 = va_arg(args, unsigned long);
real_stack.arg11 = va_arg(args, unsigned long);
real_stack.arg12 = va_arg(args, unsigned long);
real_stack.arg13 = va_arg(args, unsigned long);
va_end(args);
if (fn == 0) {
/* mem_pdc call */
fn = PAGE0->mem_pdc_hi;
fn <<= 32;
fn |= PAGE0->mem_pdc;
}
spin_lock_irqsave(&pdc_lock, flags);
r = real64_call_asm(&real_stack.sp, &real_stack.arg0, fn);
spin_unlock_irqrestore(&pdc_lock, flags);
return r;
}
#endif
......@@ -6,6 +6,7 @@
#
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_ACPI) += acpi/
obj-y += serial/
obj-$(CONFIG_PARPORT) += parport/
......
menu "Bus options (PCI, PCMCIA, EISA, GSC, ISA)"
config GSC
bool "VSC/GSC/HSC bus support"
default y
help
The VSC, GSC and HSC busses were used from the earliest 700-series
workstations up to and including the C360/J2240 workstations. They
were also used in servers from the E-class to the K-class. They
are not found in B1000, C3000, J5000, A500, L1000, N4000 and upwards.
If in doubt, say "Y".
config IOMMU_CCIO
bool "U2/Uturn I/O MMU"
depends on GSC
help
Say Y here to enable DMA management routines for the first
generation of PA-RISC cache-coherent machines. Programs the
U2/Uturn chip in "Virtual Mode" and use the I/O MMU.
config GSC_LASI
bool "Lasi I/O support"
depends on GSC
help
Say Y here to directly support the Lasi controller chip found on
PA-RISC workstations. Linux-oriented documentation for this chip
can be found at <http://www.parisc-linux.org/documentation/>.
config GSC_WAX
bool "Wax I/O support"
depends on GSC
help
Say Y here to support the Wax GSC to EISA Bridge found in some older
systems, including B/C/D/R class. Some machines use Wax for other
purposes, such as providing one of the serial ports or being an
interface chip for an X.25 GSC card.
config EISA
bool "EISA support"
depends on GSC
help
Say Y here if you have an EISA bus in your machine. This code
supports both the Mongoose & Wax EISA adapters. It is sadly
incomplete and lacks support for card-to-host DMA.
config ISA
bool
depends on EISA
config PCI
bool "PCI support"
help
All recent HP machines have PCI slots, and you should say Y here
if you have a recent machine. If you are convinced you do not have
PCI slots in your machine (eg a 712), then you may say "N" here.
Beware that some GSC cards have a Dino onboard and PCI inside them,
so it may be safest to say "Y" anyway.
config GSC_DINO
bool "GSCtoPCI/Dino PCI support"
depends on PCI && GSC
help
Say Y here to support the Dino & Cujo GSC to PCI bridges found in
machines from the B132 to the C360, the J2240 and the A180. Some
GSC/HSC cards (eg gigabit & dual 100 Mbit Ethernet) have a Dino on
the card, and you also need to say Y here if you have such a card.
If in doubt, say Y.
config PCI_LBA
bool "LBA/Elroy PCI support"
depends on PCI
help
Say Y here to support the Elroy PCI Lower Bus Adapter. This is
present on B, C, J, L and N-class machines with 4-digit model
numbers and the A400/A500.
config IOSAPIC
bool
depends on PCI_LBA
default y
config IOMMU_SBA
bool
depends on PCI_LBA
default y
#config PCI_EPIC
# bool "EPIC/SAGA PCI support"
# depends on PCI
config SUPERIO
bool
depends on PCI
help
Say Y here to support the SuperIO chip found in Bxxxx, C3xxx and
J5xxx+ machines.
source "drivers/pci/Kconfig"
config CHASSIS_LCD_LED
bool "Chassis LCD and LED support"
endmenu
#
# Makefile for most of the non-PCI devices in PA-RISC machines
#
export-objs := gsc.o superio.o
obj-y := gsc.o power.o
obj-m :=
obj-n :=
obj- :=
obj-$(CONFIG_GSC_DINO) += dino.o
obj-$(CONFIG_GSC_LASI) += lasi.o asp.o
obj-$(CONFIG_GSC_WAX) += wax.o
obj-$(CONFIG_EISA) += eisa.o eisa_enumerator.o eisa_eeprom.o
obj-$(CONFIG_SUPERIO) += superio.o
obj-$(CONFIG_PCI_LBA) += lba_pci.o
# I/O SAPIC is also on IA64 platforms.
# The two could be merged into a common source some day.
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_IOMMU_SBA) += sba_iommu.o
# Only use one of them: ccio-rm-dma is for PCX-W systems *only*
# obj-$(CONFIG_IOMMU_CCIO) += ccio-rm-dma.o
obj-$(CONFIG_IOMMU_CCIO) += ccio-dma.o
obj-$(CONFIG_CHASSIS_LCD_LED) += led.o
/*
** HP VISUALIZE Workstation PCI Bus Defect
**
** "HP has discovered a potential system defect that can affect
** the behavior of five models of HP VISUALIZE workstations when
** equipped with third-party or customer-installed PCI I/O expansion
** cards. The defect is limited to the HP C180, C160, C160L, B160L,
** and B132L VISUALIZE workstations, and will only be encountered
** when data is transmitted through PCI I/O expansion cards on the
** PCI bus. HP-supplied graphics cards that utilize the PCI bus are
** not affected."
**
** REVISIT: "go/pci_defect" link below is stale.
** HP Internal can use <http://hpfcdma.fc.hp.com:80/Dino/>
**
** Product First Good Serial Number
** C200/C240 (US) US67350000
**B132L+/B180 (US) US67390000
** C200 (Europe) 3713G01000
** B180L (Europe) 3720G01000
**
** Note that many boards were fixed/replaced under a free replacement
** program. Assume a machine is only "suspect" until proven otherwise.
**
** "The pci_check program will also be available as application
** patch PHSS_12295"
*/
/*
* ASP Device Driver
*
* (c) Copyright 2000 The Puffin Group Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* by Helge Deller <deller@gmx.de>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/led.h>
#include "gsc.h"
#define ASP_GSC_IRQ 3 /* hardcoded interrupt for GSC */
#define ASP_VER_OFFSET 0x20 /* offset of ASP version */
#define ASP_LED_ADDR 0xf0800020
#define VIPER_INT_WORD 0xFFFBF088 /* addr of viper interrupt word */
static int asp_choose_irq(struct parisc_device *dev)
{
int irq = -1;
switch (dev->id.sversion) {
case 0x71: irq = 22; break; /* SCSI */
case 0x72: irq = 23; break; /* LAN */
case 0x73: irq = 30; break; /* HIL */
case 0x74: irq = 24; break; /* Centronics */
case 0x75: irq = (dev->hw_path == 4) ? 26 : 25; break; /* RS232 */
case 0x76: irq = 21; break; /* EISA BA */
case 0x77: irq = 20; break; /* Graphics1 */
case 0x7a: irq = 18; break; /* Audio (Bushmaster) */
case 0x7b: irq = 18; break; /* Audio (Scorpio) */
case 0x7c: irq = 28; break; /* FW SCSI */
case 0x7d: irq = 27; break; /* FDDI */
case 0x7f: irq = 18; break; /* Audio (Outfield) */
}
return irq;
}
/* There are two register ranges we're interested in. Interrupt /
* Status / LED are at 0xf080xxxx and Asp special registers are at
* 0xf082fxxx. PDC only tells us that Asp is at 0xf082f000, so for
* the purposes of interrupt handling, we have to tell other bits of
* the kernel to look at the other registers.
*/
#define ASP_INTERRUPT_ADDR 0xf0800000
int __init
asp_init_chip(struct parisc_device *dev)
{
struct busdevice *asp;
struct gsc_irq gsc_irq;
int irq, ret;
asp = kmalloc(sizeof(struct busdevice), GFP_KERNEL);
if(!asp)
return -ENOMEM;
asp->version = gsc_readb(dev->hpa + ASP_VER_OFFSET) & 0xf;
asp->name = (asp->version == 1) ? "Asp" : "Cutoff";
asp->hpa = ASP_INTERRUPT_ADDR;
printk(KERN_INFO "%s version %d at 0x%lx found.\n",
asp->name, asp->version, dev->hpa);
/* the IRQ ASP should use */
ret = -EBUSY;
irq = gsc_claim_irq(&gsc_irq, ASP_GSC_IRQ);
if (irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n", __FUNCTION__);
goto out;
}
ret = request_irq(gsc_irq.irq, busdev_barked, 0, "asp", asp);
if (ret < 0)
goto out;
/* Save this for debugging later */
asp->parent_irq = gsc_irq.irq;
asp->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
/* Program VIPER to interrupt on the ASP irq */
gsc_writel((1 << (31 - ASP_GSC_IRQ)),VIPER_INT_WORD);
/* Done init'ing, register this driver */
ret = gsc_common_irqsetup(dev, asp);
if (ret)
goto out;
fixup_child_irqs(dev, asp->busdev_region->data.irqbase, asp_choose_irq);
/* Mongoose is a sibling of Asp, not a child... */
fixup_child_irqs(dev->parent, asp->busdev_region->data.irqbase,
asp_choose_irq);
/* initialize the chassis LEDs */
#ifdef CONFIG_CHASSIS_LCD_LED
register_led_driver(DISPLAY_MODEL_OLD_ASP, LED_CMD_REG_NONE,
(char *)ASP_LED_ADDR);
#endif
return 0;
out:
kfree(asp);
return ret;
}
static struct parisc_device_id asp_tbl[] = {
{ HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00070 },
{ 0, }
};
struct parisc_driver asp_driver = {
name: "Asp",
id_table: asp_tbl,
probe: asp_init_chip,
};
......@@ -38,16 +38,18 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/string.h>
#define PCI_DEBUG
#include <linux/pci.h>
#undef PCI_DEBUG
#include <asm/byteorder.h>
#include <asm/cache.h> /* for L1_CACHE_BYTES */
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/gsc.h> /* for gsc_writeN()... */
#include <asm/hardware.h> /* for register_module() */
/*
** Choose "ccio" since that's what HP-UX calls it.
......@@ -55,12 +57,10 @@
*/
#define MODULE_NAME "ccio"
/*
#define DEBUG_CCIO_RES
#define DEBUG_CCIO_RUN
#define DEBUG_CCIO_INIT
#define DUMP_RESMAP
*/
#undef DEBUG_CCIO_RES
#undef DEBUG_CCIO_RUN
#undef DEBUG_CCIO_INIT
#undef DEBUG_CCIO_RUN_SG
#include <linux/proc_fs.h>
#include <asm/runway.h> /* for proc_runway_root */
......@@ -83,68 +83,20 @@
#define DBG_RES(x...)
#endif
#ifdef DEBUG_CCIO_RUN_SG
#define DBG_RUN_SG(x...) printk(x)
#else
#define DBG_RUN_SG(x...)
#endif
#define CCIO_INLINE /* inline */
#define WRITE_U32(value, addr) gsc_writel(value, (u32 *) (addr))
#define WRITE_U32(value, addr) gsc_writel(value, (u32 *)(addr))
#define READ_U32(addr) gsc_readl((u32 *)(addr))
#define U2_IOA_RUNWAY 0x580
#define U2_BC_GSC 0x501
#define UTURN_IOA_RUNWAY 0x581
#define UTURN_BC_GSC 0x502
/* We *can't* support JAVA (T600). Venture there at your own risk. */
static void dump_resmap(void);
static int ccio_driver_callback(struct hp_device *, struct pa_iodc_driver *);
static struct pa_iodc_driver ccio_drivers_for[] = {
{HPHW_IOA, U2_IOA_RUNWAY, 0x0, 0xb, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "U2 I/O MMU", (void *) ccio_driver_callback},
{HPHW_IOA, UTURN_IOA_RUNWAY, 0x0, 0xb, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "Uturn I/O MMU", (void *) ccio_driver_callback},
/*
** FIXME: The following claims the GSC bus port, not the IOA.
** And there are two busses below a single I/O TLB.
**
** These should go away once we have a real PA bus walk.
** Firmware wants to tell the PA bus walk code about the GSC ports
** since they are not "architected" PA I/O devices. Ie a PA bus walk
** wouldn't discover them. But the PA bus walk code could check
** the "fixed module table" to add such devices to an I/O Tree
** and proceed with the recursive, depth first bus walk.
*/
{HPHW_BCPORT, U2_BC_GSC, 0x0, 0xc, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "U2 GSC+ BC", (void *) ccio_driver_callback},
{HPHW_BCPORT, UTURN_BC_GSC, 0x0, 0xc, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "Uturn GSC+ BC", (void *) ccio_driver_callback},
{0,0,0,0,0,0,
0,
(char *) NULL, (char *) NULL, (void *) NULL }
};
#define IS_U2(id) ( \
(((id)->hw_type == HPHW_IOA) && ((id)->hversion == U2_IOA_RUNWAY)) || \
(((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == U2_BC_GSC)) \
)
#define IS_UTURN(id) ( \
(((id)->hw_type == HPHW_IOA) && ((id)->hversion == UTURN_IOA_RUNWAY)) || \
(((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == UTURN_BC_GSC)) \
)
#define IOA_NORMAL_MODE 0x00020080 /* IO_CONTROL to turn on CCIO */
#define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
......@@ -176,41 +128,46 @@ struct ioa_registers {
volatile uint32_t io_io_high; /* Offset 15 */
};
struct ccio_device {
struct ccio_device *next; /* list of LBA's in system */
struct hp_device *iodc; /* data about dev from firmware */
spinlock_t ccio_lock;
struct ioa_registers *ccio_hpa; /* base address */
struct ioc {
struct ioa_registers *ioc_hpa; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
char *res_map; /* resource map, bit == pdir entry */
int res_hint; /* next available IOVP - circular search */
int res_size; /* size of resource map in bytes */
int chainid_shift; /* specify bit location of chain_id */
int flags; /* state/functionality enabled */
#ifdef DELAYED_RESOURCE_CNT
dma_addr_t res_delay[DELAYED_RESOURCE_CNT];
u32 res_hint; /* next available IOVP -
circular search */
u32 res_size; /* size of resource map in bytes */
spinlock_t res_lock;
#ifdef CONFIG_PROC_FS
#define CCIO_SEARCH_SAMPLE 0x100
unsigned long avg_search[CCIO_SEARCH_SAMPLE];
unsigned long avg_idx; /* current index into avg_search */
unsigned long used_pages;
unsigned long msingle_calls;
unsigned long msingle_pages;
unsigned long msg_calls;
unsigned long msg_pages;
unsigned long usingle_calls;
unsigned long usingle_pages;
unsigned long usg_calls;
unsigned long usg_pages;
unsigned short cujo20_bug;
#endif
/* STUFF We don't need in performance path */
int pdir_size; /* in bytes, determined by IOV Space size */
int hw_rev; /* HW revision of chip */
u32 pdir_size; /* in bytes, determined by IOV Space size */
u32 chainid_shift; /* specify bit location of chain_id */
struct ioc *next; /* Linked list of discovered iocs */
const char *name; /* device name from firmware */
unsigned int hw_path; /* the hardware path this ioc is associatd with */
struct pci_dev *fake_pci_dev; /* the fake pci_dev for non-pci devs */
struct resource mmio_region[2]; /* The "routed" MMIO regions */
};
/* Ratio of Host MEM to IOV Space size */
static unsigned long ccio_mem_ratio = 4;
static struct ccio_device *ccio_list = NULL;
static int ccio_proc_info(char *buffer, char **start, off_t offset, int length);
static unsigned long ccio_used_bytes = 0;
static unsigned long ccio_used_pages = 0;
static int ccio_cujo_bug = 0;
static unsigned long ccio_alloc_size = 0;
static unsigned long ccio_free_size = 0;
static struct ioc *ioc_list;
static int ioc_count;
/**************************************************************
*
......@@ -227,55 +184,41 @@ static unsigned long ccio_free_size = 0;
* match the I/O TLB replacement policy.
*
***************************************************************/
#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
#define IOVP_SIZE PAGE_SIZE
#define IOVP_SHIFT PAGE_SHIFT
#define IOVP_MASK PAGE_MASK
/* Convert from IOVP to IOVA and vice versa. */
#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
#define CCIO_IOVP(iova) ((iova) & ~(IOVP_SIZE-1) )
#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
/* CUJO20 KLUDGE start */
#define CUJO_20_BITMASK 0x0ffff000 /* upper nibble is a don't care */
#define CUJO_20_STEP 0x10000000 /* inc upper nibble */
#define CUJO_20_BADPAGE1 0x01003000 /* pages that hpmc on raven U+ */
#define CUJO_20_BADPAGE2 0x01607000 /* pages that hpmc on firehawk U+ */
#define CUJO_20_BADHVERS 0x6821 /* low nibble 1 is cujo rev 2.0 */
#define CUJO_RAVEN_LOC 0xf1000000UL /* cujo location on raven U+ */
#define CUJO_FIREHAWK_LOC 0xf1604000UL /* cujo location on firehawk U+ */
/* CUJO20 KLUDGE end */
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
/*
** Don't worry about the 150% average search length on a miss.
** If the search wraps around, and passes the res_hint, it will
** cause the kernel to panic anyhow.
*/
/* ioa->res_hint = idx + (size >> 3); \ */
#define CCIO_SEARCH_LOOP(ioa, idx, mask, size) \
for(; res_ptr < res_end; ++res_ptr) \
{ \
if(0 == ((*res_ptr) & mask)) { \
*res_ptr |= mask; \
idx = (int)((unsigned long)res_ptr - (unsigned long)ioa->res_map); \
ioa->res_hint = 0;\
#define CCIO_SEARCH_LOOP(ioc, res_idx, mask_ptr, size) \
for(; res_ptr < res_end; ++res_ptr) { \
if(0 == (*res_ptr & *mask_ptr)) { \
*res_ptr |= *mask_ptr; \
res_idx = (int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
ioc->res_hint = res_idx + (size >> 3); \
goto resource_found; \
} \
}
#define CCIO_FIND_FREE_MAPPING(ioa, idx, mask, size) { \
u##size *res_ptr = (u##size *)&((ioa)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
u##size *res_end = (u##size *)&(ioa)->res_map[ioa->res_size]; \
CCIO_SEARCH_LOOP(ioa, idx, mask, size); \
res_ptr = (u##size *)&(ioa)->res_map[0]; \
CCIO_SEARCH_LOOP(ioa, idx, mask, size); \
}
#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
u##size *mask_ptr = (u##size *)&mask; \
CCIO_SEARCH_LOOP(ioc, res_idx, mask_ptr, size); \
res_ptr = (u##size *)&(ioc)->res_map[0]; \
CCIO_SEARCH_LOOP(ioa, res_idx, mask_ptr, size);
/*
** Find available bit in this ioa's resource map.
......@@ -290,123 +233,132 @@ static unsigned long ccio_free_size = 0;
** o use different search for "large" (eg > 4 pages) or "very large"
** (eg > 16 pages) mappings.
*/
/**
* ccio_alloc_range - Allocate pages in the ioc's resource map.
* @ioc: The I/O Controller.
* @pages_needed: The requested number of pages to be mapped into the
* I/O Pdir...
*
* This function searches the resource map of the ioc to locate a range
* of available pages for the requested size.
*/
static int
ccio_alloc_range(struct ccio_device *ioa, size_t size)
ccio_alloc_range(struct ioc *ioc, unsigned long pages_needed)
{
int res_idx;
unsigned long mask, flags;
unsigned int pages_needed = size >> PAGE_SHIFT;
unsigned long mask;
#ifdef CONFIG_PROC_FS
unsigned long cr_start = mfctl(16);
#endif
ASSERT(pages_needed);
ASSERT((pages_needed * IOVP_SIZE) < DMA_CHUNK_SIZE);
ASSERT(pages_needed < (BITS_PER_LONG - IOVP_SHIFT));
ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE);
ASSERT(pages_needed <= BITS_PER_LONG);
mask = (unsigned long) -1L;
mask >>= BITS_PER_LONG - pages_needed;
mask = ~(~0UL >> pages_needed);
DBG_RES(__FUNCTION__ " size: %d pages_needed %d pages_mask 0x%08lx\n",
size, pages_needed, mask);
spin_lock_irqsave(&ioa->ccio_lock, flags);
DBG_RES("%s() size: %d pages_needed %d mask 0x%08lx\n",
__FUNCTION__, size, pages_needed, mask);
/*
** "seek and ye shall find"...praying never hurts either...
** ggg sacrafices another 710 to the computer gods.
** ggg sacrifices another 710 to the computer gods.
*/
if(pages_needed <= 8) {
CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, 8);
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
} else if(pages_needed <= 16) {
CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, 16);
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 16);
} else if(pages_needed <= 32) {
CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, 32);
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 32);
#ifdef __LP64__
} else if(pages_needed <= 64) {
CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, 64)
CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 64);
#endif
} else {
panic(__FILE__ ":" __FUNCTION__ "() Too many pages to map.\n");
panic(__FILE__ ": %s() Too many pages to map. pages_needed: %ld\n",
__FUNCTION__, pages_needed);
}
#ifdef DUMP_RESMAP
dump_resmap();
#endif
panic(__FILE__ ":" __FUNCTION__ "() I/O MMU is out of mapping resources\n");
panic(__FILE__ ": %s() I/O MMU is out of mapping resources.\n",
__FUNCTION__);
resource_found:
DBG_RES(__FUNCTION__ " res_idx %d mask 0x%08lx res_hint: %d\n",
res_idx, mask, ioa->res_hint);
ccio_used_pages += pages_needed;
ccio_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
DBG_RES("%s() res_idx %d mask 0x%08lx res_hint: %d\n",
__FUNCTION__, res_idx, mask, ioc->res_hint);
spin_unlock_irqrestore(&ioa->ccio_lock, flags);
#ifdef CONFIG_PROC_FS
{
unsigned long cr_end = mfctl(16);
unsigned long tmp = cr_end - cr_start;
/* check for roll over */
cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
}
ioc->avg_search[ioc->avg_idx++] = cr_start;
ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
#ifdef DUMP_RESMAP
dump_resmap();
ioc->used_pages += pages_needed;
#endif
/*
** return the bit address (convert from byte to bit).
** return the bit address.
*/
return (res_idx << 3);
return res_idx << 3;
}
#define CCIO_FREE_MAPPINGS(ioa, idx, mask, size) \
u##size *res_ptr = (u##size *)&((ioa)->res_map[idx + (((size >> 3) - 1) & ~((size >> 3) - 1))]); \
ASSERT((*res_ptr & mask) == mask); \
*res_ptr &= ~mask;
/*
** clear bits in the ioa's resource map
*/
#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
u##size *mask_ptr = (u##size *)&mask; \
ASSERT((*res_ptr & *mask_ptr) == *mask_ptr); \
*res_ptr &= ~(*mask_ptr);
/**
* ccio_free_range - Free pages from the ioc's resource map.
* @ioc: The I/O Controller.
* @iova: The I/O Virtual Address.
* @pages_mapped: The requested number of pages to be freed from the
* I/O Pdir.
*
* This function frees the resouces allocated for the iova.
*/
static void
ccio_free_range(struct ccio_device *ioa, dma_addr_t iova, size_t size)
ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
{
unsigned long mask, flags;
unsigned long mask;
unsigned long iovp = CCIO_IOVP(iova);
unsigned int res_idx = PDIR_INDEX(iovp)>>3;
unsigned int pages_mapped = (size >> IOVP_SHIFT) + !!(size & ~IOVP_MASK);
unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
ASSERT(pages_needed);
ASSERT((pages_needed * IOVP_SIZE) < DMA_CHUNK_SIZE);
ASSERT(pages_needed < (BITS_PER_LONG - IOVP_SHIFT));
ASSERT(pages_mapped);
ASSERT((pages_mapped * IOVP_SIZE) <= DMA_CHUNK_SIZE);
ASSERT(pages_mapped <= BITS_PER_LONG);
mask = (unsigned long) -1L;
mask >>= BITS_PER_LONG - pages_mapped;
mask = ~(~0UL >> pages_mapped);
DBG_RES(__FUNCTION__ " res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
res_idx, size, pages_mapped, mask);
DBG_RES("%s(): res_idx: %d pages_mapped %d mask 0x%08lx\n",
__FUNCTION__, res_idx, pages_mapped, mask);
spin_lock_irqsave(&ioa->ccio_lock, flags);
#ifdef CONFIG_PROC_FS
ioc->used_pages -= pages_mapped;
#endif
if(pages_mapped <= 8) {
CCIO_FREE_MAPPINGS(ioa, res_idx, mask, 8);
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
} else if(pages_mapped <= 16) {
CCIO_FREE_MAPPINGS(ioa, res_idx, mask, 16);
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 16);
} else if(pages_mapped <= 32) {
CCIO_FREE_MAPPINGS(ioa, res_idx, mask, 32);
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 32);
#ifdef __LP64__
} else if(pages_mapped <= 64) {
CCIO_FREE_MAPPINGS(ioa, res_idx, mask, 64);
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 64);
#endif
} else {
panic(__FILE__ ":" __FUNCTION__ "() Too many pages to unmap.\n");
panic(__FILE__ ":%s() Too many pages to unmap.\n",
__FUNCTION__);
}
ccio_used_pages -= (pages_mapped ? pages_mapped : 1);
ccio_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
spin_unlock_irqrestore(&ioa->ccio_lock, flags);
#ifdef DUMP_RESMAP
dump_resmap();
#endif
}
/****************************************************************
**
** CCIO dma_ops support routines
......@@ -416,7 +368,6 @@ ccio_free_range(struct ccio_device *ioa, dma_addr_t iova, size_t size)
typedef unsigned long space_t;
#define KERNEL_SPACE 0
/*
** DMA "Page Type" and Hints
** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be
......@@ -466,32 +417,35 @@ static u32 hint_lookup[] = {
[PCI_DMA_NONE] 0, /* not valid */
};
/*
** Initialize an I/O Pdir entry
**
** Given a virtual address (vba, arg2) and space id, (sid, arg1),
** load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
** entry consists of 8 bytes as shown below (MSB == bit 0):
**
**
** WORD 0:
** +------+----------------+-----------------------------------------------+
** | Phys | Virtual Index | Phys |
** | 0:3 | 0:11 | 4:19 |
** |4 bits| 12 bits | 16 bits |
** +------+----------------+-----------------------------------------------+
** WORD 1:
** +-----------------------+-----------------------------------------------+
** | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
** | 20:39 | | Enable |Enable | |Enable|DMA | |
** | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
** +-----------------------+-----------------------------------------------+
**
** The virtual index field is filled with the results of the LCI
** (Load Coherence Index) instruction. The 8 bits used for the virtual
** index are bits 12:19 of the value returned by LCI.
*/
/**
* ccio_io_pdir_entry - Initialize an I/O Pdir.
* @pdir_ptr: A pointer into I/O Pdir.
* @sid: The Space Identifier.
* @vba: The virtual address.
* @hints: The DMA Hint.
*
* Given a virtual address (vba, arg2) and space id, (sid, arg1),
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
* entry consists of 8 bytes as shown below (MSB == bit 0):
*
*
* WORD 0:
* +------+----------------+-----------------------------------------------+
* | Phys | Virtual Index | Phys |
* | 0:3 | 0:11 | 4:19 |
* |4 bits| 12 bits | 16 bits |
* +------+----------------+-----------------------------------------------+
* WORD 1:
* +-----------------------+-----------------------------------------------+
* | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
* | 20:39 | | Enable |Enable | |Enable|DMA | |
* | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
* +-----------------------+-----------------------------------------------+
*
* The virtual index field is filled with the results of the LCI
* (Load Coherence Index) instruction. The 8 bits used for the virtual
* index are bits 12:19 of the value returned by LCI.
*/
void CCIO_INLINE
ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, void * vba, unsigned long hints)
{
......@@ -499,8 +453,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, void * vba, unsigned long hints)
register unsigned long ci; /* coherent index */
/* We currently only support kernel addresses */
ASSERT(sid == 0);
ASSERT(((unsigned long) vba & 0xf0000000UL) == 0xc0000000UL);
ASSERT(sid == KERNEL_SPACE);
mtsp(sid,1);
......@@ -524,7 +477,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, void * vba, unsigned long hints)
** and deposit them
*/
asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
asm volatile ("extrd,u %1,31,16,%0" : "+r" (ci) : "r" (ci));
asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
#else
pa = 0;
......@@ -556,31 +509,39 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, void * vba, unsigned long hints)
asm volatile("sync");
}
/*
** Remove stale entries from the I/O TLB.
** Need to do this whenever an entry in the PDIR is marked invalid.
*/
/**
* ccio_clear_io_tlb - Remove stale entries from the I/O TLB.
* @ioc: The I/O Controller.
* @iovp: The I/O Virtual Page.
* @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
*
* Purge invalid I/O PDIR entries from the I/O TLB.
*
* FIXME: Can we change the byte_cnt to pages_mapped?
*/
static CCIO_INLINE void
ccio_clear_io_tlb( struct ccio_device *d, dma_addr_t iovp, size_t byte_cnt)
ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
{
u32 chain_size = 1 << d->chainid_shift;
u32 chain_size = 1 << ioc->chainid_shift;
iovp &= ~(IOVP_SIZE-1); /* clear offset bits, just want pagenum */
iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */
byte_cnt += chain_size;
while (byte_cnt > chain_size) {
WRITE_U32(CMD_TLB_PURGE | iovp, &d->ccio_hpa->io_command);
while(byte_cnt > chain_size) {
WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_hpa->io_command);
iovp += chain_size;
byte_cnt -= chain_size;
}
}
/***********************************************************
/**
* ccio_mark_invalid - Mark the I/O Pdir entries invalid.
* @ioc: The I/O Controller.
* @iova: The I/O Virtual Address.
* @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
*
* Mark the I/O Pdir entries invalid and blow away the
* corresponding I/O TLB entries.
* Mark the I/O Pdir entries invalid and blow away the corresponding I/O
* TLB entries.
*
* FIXME: at some threshhold it might be "cheaper" to just blow
* away the entire I/O TLB instead of individual entries.
......@@ -588,25 +549,25 @@ ccio_clear_io_tlb( struct ccio_device *d, dma_addr_t iovp, size_t byte_cnt)
* FIXME: Uturn has 256 TLB entries. We don't need to purge every
* PDIR entry - just once for each possible TLB entry.
* (We do need to maker I/O PDIR entries invalid regardless).
***********************************************************/
*
* FIXME: Can we change byte_cnt to pages_mapped?
*/
static CCIO_INLINE void
ccio_mark_invalid(struct ccio_device *d, dma_addr_t iova, size_t byte_cnt)
ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) CCIO_IOVP(iova);
u32 iovp = (u32)CCIO_IOVP(iova);
size_t saved_byte_cnt;
/* round up to nearest page size */
saved_byte_cnt = byte_cnt = (byte_cnt + IOVP_SIZE - 1) & IOVP_MASK;
saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE);
while (byte_cnt > 0) {
while(byte_cnt > 0) {
/* invalidate one page at a time */
unsigned int idx = PDIR_INDEX(iovp);
char *pdir_ptr = (char *) &(d->pdir_base[idx]);
ASSERT( idx < (d->pdir_size/sizeof(u64)));
char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
ASSERT(idx < (ioc->pdir_size / sizeof(u64)));
pdir_ptr[7] = 0; /* clear only VALID bit */
/*
** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
** PCX-U/U+ do. (eg C200/C240)
......@@ -622,232 +583,485 @@ ccio_mark_invalid(struct ccio_device *d, dma_addr_t iova, size_t byte_cnt)
}
asm volatile("sync");
ccio_clear_io_tlb(d, CCIO_IOVP(iova), saved_byte_cnt);
ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
}
/****************************************************************
**
** CCIO dma_ops
**
*****************************************************************/
void __init ccio_init(void)
{
register_driver(ccio_drivers_for);
}
static int ccio_dma_supported( struct pci_dev *dev, u64 mask)
/**
* ccio_dma_supported - Verify the IOMMU supports the DMA address range.
* @dev: The PCI device.
* @mask: A bit mask describing the DMA address range of the device.
*
* This function implements the pci_dma_supported function.
*/
static int
ccio_dma_supported(struct pci_dev *dev, u64 mask)
{
if (dev == NULL) {
printk(MODULE_NAME ": EISA/ISA/et al not supported\n");
if(dev == NULL) {
printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
BUG();
return(0);
return 0;
}
dev->dma_mask = mask; /* save it */
/* only support 32-bit devices (ie PCI/GSC) */
return((int) (mask >= 0xffffffffUL));
}
/*
** Dump a hex representation of the resource map.
*/
#ifdef DUMP_RESMAP
static
void dump_resmap()
{
struct ccio_device *ioa = ccio_list;
unsigned long *res_ptr = (unsigned long *)ioa->res_map;
unsigned long i = 0;
printk("res_map: ");
for(; i < (ioa->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
printk("%08lx ", *res_ptr);
printk("\n");
return (int)(mask == 0xffffffffUL);
}
#endif
/*
** map_single returns a fully formed IOVA
*/
static dma_addr_t ccio_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
/**
* ccio_map_single - Map an address range into the IOMMU.
* @dev: The PCI device.
* @addr: The start address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_map_single function.
*/
static dma_addr_t
ccio_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
{
struct ccio_device *ioa = ccio_list; /* FIXME : see Multi-IOC below */
int idx;
struct ioc *ioc;
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
u64 *pdir_start;
unsigned long hint = hint_lookup[direction];
int idx;
ASSERT(dev);
ASSERT(dev->sysdata);
ASSERT(HBA_DATA(dev->sysdata)->iommu);
ioc = GET_IOC(dev);
ASSERT(size > 0);
/* save offset bits */
offset = ((dma_addr_t) addr) & ~IOVP_MASK;
offset = ((unsigned long) addr) & ~IOVP_MASK;
/* round up to nearest IOVP_SIZE */
size = (size + offset + IOVP_SIZE - 1) & IOVP_MASK;
size = ROUNDUP(size + offset, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS
ioc->msingle_calls++;
ioc->msingle_pages += size >> IOVP_SHIFT;
#endif
idx = ccio_alloc_range(ioa, size);
iovp = (dma_addr_t) MKIOVP(idx);
idx = ccio_alloc_range(ioc, (size >> IOVP_SHIFT));
iovp = (dma_addr_t)MKIOVP(idx);
DBG_RUN(__FUNCTION__ " 0x%p -> 0x%lx", addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[idx]);
pdir_start = &(ioa->pdir_base[idx]);
DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
__FUNCTION__, addr, (long)iovp | offset, size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
if ((size % L1_CACHE_BYTES) || ((unsigned long) addr % L1_CACHE_BYTES))
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
hint |= HINT_SAFE_DMA;
/* round up to nearest IOVP_SIZE */
size = (size + IOVP_SIZE - 1) & IOVP_MASK;
while (size > 0) {
while(size > 0) {
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
DBG_RUN(" pdir %p %08x%08x\n",
pdir_start,
(u32) (((u32 *) pdir_start)[0]),
(u32) (((u32 *) pdir_start)[1])
);
(u32) (((u32 *) pdir_start)[1]));
++pdir_start;
addr += IOVP_SIZE;
size -= IOVP_SIZE;
pdir_start++;
}
spin_unlock_irqrestore(&ioc->res_lock, flags);
/* form complete address */
return CCIO_IOVA(iovp, offset);
}
static void ccio_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction)
/**
* ccio_unmap_single - Unmap an address range from the IOMMU.
* @dev: The PCI device.
* @addr: The start address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_unmap_single function.
*/
static void
ccio_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
int direction)
{
#ifdef FIXME
/* Multi-IOC (ie N-class) : need to lookup IOC from dev
** o If we can't know about lba PCI data structs, that eliminates ->sysdata.
** o walking up pcidev->parent dead ends at elroy too
** o leaves hashing dev->bus->number into some lookup.
** (may only work for N-class)
*/
struct ccio_device *ioa = dev->sysdata
#else
struct ccio_device *ioa = ccio_list;
#endif
dma_addr_t offset;
struct ioc *ioc;
unsigned long flags;
dma_addr_t offset = iova & ~IOVP_MASK;
offset = iova & ~IOVP_MASK;
ASSERT(dev);
ASSERT(dev->sysdata);
ASSERT(HBA_DATA(dev->sysdata)->iommu);
ioc = GET_IOC(dev);
/* round up to nearest IOVP_SIZE */
size = (size + offset + IOVP_SIZE - 1) & IOVP_MASK;
DBG_RUN("%s() iovp 0x%lx/%x\n",
__FUNCTION__, (long)iova, size);
/* Mask off offset */
iova &= IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
size = ROUNDUP(size, IOVP_SIZE);
DBG_RUN(__FUNCTION__ " iovp 0x%lx\n", (long) iova);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef DELAYED_RESOURCE_CNT
if (ioa->saved_cnt < DELAYED_RESOURCE_CNT) {
ioa->saved_iova[ioa->saved_cnt] = iova;
ioa->saved_size[ioa->saved_cnt] = size;
ccio_saved_cnt++;
} else {
do {
#endif
ccio_mark_invalid(ioa, iova, size);
ccio_free_range(ioa, iova, size);
#ifdef DELAYED_RESOURCE_CNT
d->saved_cnt--;
iova = ioa->saved_iova[ioa->saved_cnt];
size = ioa->saved_size[ioa->saved_cnt];
} while (ioa->saved_cnt)
}
#ifdef CONFIG_PROC_FS
ioc->usingle_calls++;
ioc->usingle_pages += size >> IOVP_SHIFT;
#endif
}
ccio_mark_invalid(ioc, iova, size);
ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
spin_unlock_irqrestore(&ioc->res_lock, flags);
}
static void * ccio_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
/**
* ccio_alloc_consistent - Allocate a consistent DMA mapping.
* @dev: The PCI device.
* @size: The length of the DMA region.
* @dma_handle: The DMA address handed back to the device (not the cpu).
*
* This function implements the pci_alloc_consistent function.
*/
static void *
ccio_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma_handle)
{
void *ret;
unsigned long flags;
struct ccio_device *ioa = ccio_list;
DBG_RUN(__FUNCTION__ " size 0x%x\n", size);
#if 0
/* GRANT Need to establish hierarchy for non-PCI devs as well
** and then provide matching gsc_map_xxx() functions for them as well.
*/
if (!hwdev) {
if(!hwdev) {
/* only support PCI */
*dma_handle = 0;
return 0;
}
#endif
spin_lock_irqsave(&ioa->ccio_lock, flags);
ccio_alloc_size += get_order(size);
spin_unlock_irqrestore(&ioa->ccio_lock, flags);
ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size));
if (ret) {
memset(ret, 0, size);
*dma_handle = ccio_map_single(hwdev, ret, size, PCI_DMA_BIDIRECTIONAL);
*dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
}
DBG_RUN(__FUNCTION__ " ret %p\n", ret);
return ret;
}
/**
* ccio_free_consistent - Free a consistent DMA mapping.
* @dev: The PCI device.
* @size: The length of the DMA region.
* @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
* @dma_handle: The device address returned from the ccio_alloc_consistent.
*
* This function implements the pci_free_consistent function.
*/
static void
ccio_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
ccio_unmap_single(dev, dma_handle, size, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
static void ccio_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x80000000UL
/**
* ccio_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
* @ioc: The I/O Controller.
* @startsg: The scatter/gather list of coalesced chunks.
* @nents: The number of entries in the scatter/gather list.
* @hint: The DMA Hint.
*
* This function inserts the coalesced scatter/gather list chunks into the
* I/O Controller's I/O Pdir.
*/
static CCIO_INLINE int
ccio_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint)
{
unsigned long flags;
struct ccio_device *ioa = ccio_list;
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
int n_mappings = 0;
u64 *pdirp = 0;
unsigned long dma_offset = 0;
dma_sg--;
while (nents-- > 0) {
int cnt = sg_dma_len(startsg);
sg_dma_len(startsg) = 0;
DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
(unsigned long)sg_dma_address(startsg), cnt,
sg_virt_addr(startsg), startsg->length
);
spin_lock_irqsave(&ioa->ccio_lock, flags);
ccio_free_size += get_order(size);
spin_unlock_irqrestore(&ioa->ccio_lock, flags);
/*
** Look for the start of a new DMA stream
*/
if(sg_dma_address(startsg) & PIDE_FLAG) {
u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
dma_offset = (unsigned long) pide & ~IOVP_MASK;
sg_dma_address(startsg) = 0;
dma_sg++;
sg_dma_address(dma_sg) = pide;
pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
n_mappings++;
}
ccio_unmap_single(hwdev, dma_handle, size, 0);
free_pages((unsigned long) vaddr, get_order(size));
/*
** Look for a VCONTIG chunk
*/
if (cnt) {
unsigned long vaddr = sg_virt_addr(startsg);
ASSERT(pdirp);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
sg_dma_len(dma_sg) += cnt;
cnt += dma_offset;
dma_offset=0; /* only want offset on first chunk */
cnt = ROUNDUP(cnt, IOVP_SIZE);
#ifdef CONFIG_PROC_FS
ioc->msg_pages += cnt >> IOVP_SHIFT;
#endif
do {
ccio_io_pdir_entry(pdirp, KERNEL_SPACE,
(void *)vaddr, hint);
vaddr += IOVP_SIZE;
cnt -= IOVP_SIZE;
pdirp++;
} while (cnt > 0);
}
startsg++;
}
return(n_mappings);
}
/*
** First pass is to walk the SG list and determine where the breaks are
** in the DMA stream. Allocates PDIR entries but does not fill them.
** Returns the number of DMA chunks.
**
** Doing the fill seperate from the coalescing/allocation keeps the
** code simpler. Future enhancement could make one pass through
** the sglist do both.
*/
static int ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
static CCIO_INLINE int
ccio_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents)
{
int tmp = nents;
struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
unsigned long vcontig_len; /* len of VCONTIG chunk */
unsigned long vcontig_end;
struct scatterlist *dma_sg; /* next DMA stream head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
DBG_RUN(KERN_WARNING __FUNCTION__ " START\n");
while (nents > 0) {
/* KISS: map each buffer seperately. */
while (nents) {
sg_dma_address(sglist) = ccio_map_single(dev, sglist->address, sglist->length, direction);
sg_dma_len(sglist) = sglist->length;
nents--;
sglist++;
/*
** Prepare for first/next DMA stream
*/
dma_sg = vcontig_sg = startsg;
dma_len = vcontig_len = vcontig_end = startsg->length;
vcontig_end += sg_virt_addr(startsg);
dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
sg_dma_len(startsg) = 0;
/*
** This loop terminates one iteration "early" since
** it's always looking one "ahead".
*/
while(--nents > 0) {
unsigned long startsg_end;
startsg++;
startsg_end = sg_virt_addr(startsg) +
startsg->length;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
sg_dma_len(startsg) = 0;
/*
** First make sure current dma stream won't
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
if(ROUNDUP(dma_len + dma_offset + startsg->length,
IOVP_SIZE) > DMA_CHUNK_SIZE)
break;
/*
** Append the next transaction?
*/
if (vcontig_end == sg_virt_addr(startsg)) {
vcontig_len += startsg->length;
vcontig_end += startsg->length;
dma_len += startsg->length;
continue;
}
DBG_RUN(KERN_WARNING __FUNCTION__ " DONE\n");
return tmp;
}
/*
** Not virtually contigous.
** Terminate prev chunk.
** Start a new chunk.
**
** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
sg_dma_len(vcontig_sg) = vcontig_len;
vcontig_sg = startsg;
vcontig_len = startsg->length;
break;
}
/*
** End of DMA Stream
** Terminate last VCONTIG block.
** Allocate space for DMA stream.
*/
sg_dma_len(vcontig_sg) = vcontig_len;
dma_len = ROUNDUP(dma_len + dma_offset, IOVP_SIZE);
sg_dma_address(dma_sg) =
PIDE_FLAG
| (ccio_alloc_range(ioc, (dma_len >> IOVP_SHIFT)) << IOVP_SHIFT)
| dma_offset;
n_mappings++;
}
return n_mappings;
}
static void ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
/**
* ccio_map_sg - Map the scatter/gather list into the IOMMU.
* @dev: The PCI device.
* @sglist: The scatter/gather list to be mapped in the IOMMU.
* @nents: The number of entries in the scatter/gather list.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_map_sg function.
*/
static int
ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
int direction)
{
DBG_RUN(KERN_WARNING __FUNCTION__ " : unmapping %d entries\n", nents);
while (nents) {
ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
nents--;
sglist++;
struct ioc *ioc;
int coalesced, filled = 0;
unsigned long flags;
unsigned long hint = hint_lookup[direction];
ASSERT(dev);
ASSERT(dev->sysdata);
ASSERT(HBA_DATA(dev->sysdata)->iommu);
ioc = GET_IOC(dev);
DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist) = ccio_map_single(dev,
(void *)sg_virt_addr(sglist), sglist->length,
direction);
sg_dma_len(sglist) = sglist->length;
return 1;
}
return;
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS
ioc->msg_calls++;
#endif
/*
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = ccio_coalesce_chunks(ioc, sglist, nents);
/*
** Program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o page/offset contain the virtual address.
*/
filled = ccio_fill_pdir(ioc, sglist, nents, hint);
spin_unlock_irqrestore(&ioc->res_lock, flags);
ASSERT(coalesced == filled);
DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
return filled;
}
/**
* ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU.
* @dev: The PCI device.
* @sglist: The scatter/gather list to be unmapped from the IOMMU.
* @nents: The number of entries in the scatter/gather list.
* @direction: The direction of the DMA transaction (to/from device).
*
* This function implements the pci_unmap_sg function.
*/
static void
ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
int direction)
{
struct ioc *ioc;
ASSERT(dev);
ASSERT(dev->sysdata);
ASSERT(HBA_DATA(dev->sysdata)->iommu);
ioc = GET_IOC(dev);
DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
__FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);
#ifdef CONFIG_PROC_FS
ioc->usg_calls++;
#endif
while(sg_dma_len(sglist) && nents--) {
#ifdef CONFIG_PROC_FS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
ccio_unmap_single(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction);
++sglist;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
}
static struct pci_dma_ops ccio_ops = {
ccio_dma_supported,
......@@ -861,6 +1075,201 @@ static struct pci_dma_ops ccio_ops = {
NULL, /* dma_sync_sg : ditto */
};
#ifdef CONFIG_PROC_FS
static int proc_append(char *src, int len, char **dst, off_t *offset, int *max)
{
if (len < *offset) {
*offset -= len;
return 0;
}
if (*offset > 0) {
src += *offset;
len -= *offset;
*offset = 0;
}
if (len > *max) {
len = *max;
}
memcpy(*dst, src, len);
*dst += len;
*max -= len;
return (*max == 0);
}
static int ccio_proc_info(char *buf, char **start, off_t offset, int count,
int *eof, void *data)
{
int max = count;
char tmp[80]; /* width of an ANSI-standard terminal */
struct ioc *ioc = ioc_list;
while (ioc != NULL) {
unsigned int total_pages = ioc->res_size << 3;
unsigned long avg = 0, min, max;
int j, len;
len = sprintf(tmp, "%s\n", ioc->name);
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "Cujo 2.0 bug : %s\n",
(ioc->cujo20_bug ? "yes" : "no"));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "IO PDIR size : %d bytes (%d entries)\n",
total_pages * 8, total_pages);
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "IO PDIR entries : %ld free %ld used (%d%%)\n",
total_pages - ioc->used_pages, ioc->used_pages,
(int)(ioc->used_pages * 100 / total_pages));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "Resource bitmap : %d bytes (%d pages)\n",
ioc->res_size, total_pages);
if (proc_append(tmp, len, &buf, &offset, &count))
break;
min = max = ioc->avg_search[0];
for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
avg += ioc->avg_search[j];
if(ioc->avg_search[j] > max)
max = ioc->avg_search[j];
if(ioc->avg_search[j] < min)
min = ioc->avg_search[j];
}
avg /= CCIO_SEARCH_SAMPLE;
len = sprintf(tmp, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
min, avg, max);
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
ioc->msingle_calls, ioc->msingle_pages,
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
/* KLUGE - unmap_sg calls unmap_single for each mapped page */
min = ioc->usingle_calls - ioc->usg_calls;
max = ioc->usingle_pages - ioc->usg_pages;
len = sprintf(tmp, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
min, max, (int)((max * 1000)/min));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
ioc->msg_calls, ioc->msg_pages,
(int)((ioc->msg_pages * 1000)/ioc->msg_calls));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
len = sprintf(tmp, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
ioc->usg_calls, ioc->usg_pages,
(int)((ioc->usg_pages * 1000)/ioc->usg_calls));
if (proc_append(tmp, len, &buf, &offset, &count))
break;
ioc = ioc->next;
}
if (count == 0) {
*eof = 1;
}
return (max - count);
}
static int ccio_resource_map(char *buf, char **start, off_t offset, int len,
int *eof, void *data)
{
struct ioc *ioc = ioc_list;
buf[0] = '\0';
while (ioc != NULL) {
u32 *res_ptr = (u32 *)ioc->res_map;
int j;
for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
if ((j & 7) == 0)
strcat(buf,"\n ");
sprintf(buf, "%s %08x", buf, *res_ptr);
res_ptr++;
}
strcat(buf, "\n\n");
ioc = ioc->next;
break; /* XXX - remove me */
}
return strlen(buf);
}
#endif
/**
* ccio_find_ioc - Find the ioc in the ioc_list
* @hw_path: The hardware path of the ioc.
*
* This function searches the ioc_list for an ioc that matches
* the provide hardware path.
*/
static struct ioc * ccio_find_ioc(int hw_path)
{
int i;
struct ioc *ioc;
ioc = ioc_list;
for (i = 0; i < ioc_count; i++) {
if (ioc->hw_path == hw_path)
return ioc;
ioc = ioc->next;
}
return NULL;
}
/**
* ccio_get_iommu - Find the iommu which controls this device
* @dev: The parisc device.
*
* This function searches through the registerd IOMMU's and returns the
* appropriate IOMMU for the device based upon the devices hardware path.
*/
void * ccio_get_iommu(const struct parisc_device *dev)
{
dev = find_pa_parent_type(dev, HPHW_IOA);
if (!dev)
return NULL;
return ccio_find_ioc(dev->hw_path);
}
#define CUJO_20_STEP 0x10000000 /* inc upper nibble */
/* Cujo 2.0 has a bug which will silently corrupt data being transferred
* to/from certain pages. To avoid this happening, we mark these pages
* as `used', and ensure that nothing will try to allocate from them.
*/
void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp)
{
unsigned int idx;
struct ioc *ioc = ccio_get_iommu(dev);
u8 *res_ptr;
#ifdef CONFIG_PROC_FS
ioc->cujo20_bug = 1;
#endif
res_ptr = ioc->res_map;
idx = PDIR_INDEX(iovp) >> 3;
while (idx < ioc->res_size) {
res_ptr[idx] |= 0xff;
idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
}
}
#if 0
/* GRANT - is this needed for U2 or not? */
......@@ -875,38 +1284,37 @@ static struct pci_dma_ops ccio_ops = {
** I think only Java (K/D/R-class too?) systems don't do this.
*/
static int
ccio_get_iotlb_size(struct hp_device *d)
ccio_get_iotlb_size(struct parisc_device *dev)
{
if(d->spa_shift == 0) {
panic(__FUNCTION__ ": Can't determine I/O TLB size.\n");
if (dev->spa_shift == 0) {
panic("%s() : Can't determine I/O TLB size.\n", __FUNCTION__);
}
return(1 << d->spa_shift);
return (1 << dev->spa_shift);
}
#else
/* Uturn supports 256 TLB entries */
#define CCIO_CHAINID_SHIFT 8
#define CCIO_CHAINID_MASK 0xff
#endif /* 0 */
/*
** Figure out how big the I/O PDIR should be and alloc it.
** Also sets variables which depend on pdir size.
*/
/**
* ccio_ioc_init - Initalize the I/O Controller
* @ioc: The I/O Controller.
*
* Initalize the I/O Controller which includes setting up the
* I/O Page Directory, the resource map, and initalizing the
* U2/Uturn chip into virtual mode.
*/
static void
ccio_alloc_pdir(struct ccio_device *ioa)
ccio_ioc_init(struct ioc *ioc)
{
extern unsigned long mem_max; /* arch.../setup.c */
u32 iova_space_size = 0;
void * pdir_base;
int pdir_size, iov_order;
int i, iov_order;
u32 iova_space_size;
unsigned long physmem;
/*
** Determine IOVA Space size from memory size.
** Using "mem_max" is a kluge.
**
** Ideally, PCI drivers would register the maximum number
** of DMA they can have outstanding for each device they
......@@ -915,15 +1323,18 @@ ccio_alloc_pdir(struct ccio_device *ioa)
** methods still require some "extra" to support PCI
** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
*/
/* limit IOVA space size to 1MB-1GB */
if (mem_max < (ccio_mem_ratio*1024*1024)) {
iova_space_size = 1024*1024;
physmem = num_physpages << PAGE_SHIFT;
if(physmem < (ccio_mem_ratio * 1024 * 1024)) {
iova_space_size = 1024 * 1024;
#ifdef __LP64__
} else if (mem_max > (ccio_mem_ratio*512*1024*1024)) {
iova_space_size = 512*1024*1024;
} else if(physmem > (ccio_mem_ratio * 512 * 1024 * 1024)) {
iova_space_size = 512 * 1024 * 1024;
#endif
} else {
iova_space_size = (u32) (mem_max/ccio_mem_ratio);
iova_space_size = (u32)(physmem / ccio_mem_ratio);
}
/*
......@@ -939,271 +1350,264 @@ ccio_alloc_pdir(struct ccio_device *ioa)
** this is the case under linux."
*/
iov_order = get_order(iova_space_size);
iov_order = get_order(iova_space_size) >> (IOVP_SHIFT - PAGE_SHIFT);
ASSERT(iov_order <= (30 - IOVP_SHIFT)); /* iova_space_size <= 1GB */
ASSERT(iov_order >= (20 - IOVP_SHIFT)); /* iova_space_size >= 1MB */
iova_space_size = 1 << (iov_order + IOVP_SHIFT);
ioa->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
ASSERT(pdir_size < 4*1024*1024); /* max pdir size < 4MB */
ASSERT(ioc->pdir_size < 4 * 1024 * 1024); /* max pdir size < 4MB */
/* Verify it's a power of two */
ASSERT((1 << get_order(pdir_size)) == (pdir_size >> PAGE_SHIFT));
ASSERT((1 << get_order(ioc->pdir_size)) == (ioc->pdir_size >> PAGE_SHIFT));
DBG_INIT(__FUNCTION__ " hpa 0x%p mem %dMB IOV %dMB (%d bits)\n PDIR size 0x%0x",
ioa->ccio_hpa, (int) (mem_max>>20), iova_space_size>>20,
iov_order + PAGE_SHIFT, pdir_size);
DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits) PDIR size 0x%0x",
__FUNCTION__, ioc->ioc_hpa, physmem>>20, iova_space_size>>20,
iov_order + PAGE_SHIFT, ioc->pdir_size);
ioa->pdir_base =
pdir_base = (void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size));
if (NULL == pdir_base)
{
panic(__FILE__ ":" __FUNCTION__ "() could not allocate I/O Page Table\n");
ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) {
panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__);
}
memset(ioc->pdir_base, 0, ioc->pdir_size);
ASSERT((((unsigned long)ioc->pdir_base) & PAGE_MASK) == (unsigned long)ioc->pdir_base);
DBG_INIT(" base %p", ioc->pdir_base);
/* resource map size dictated by pdir_size */
ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->res_size));
if(NULL == ioc->res_map) {
panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__);
}
memset(pdir_base, 0, pdir_size);
memset(ioc->res_map, 0, ioc->res_size);
ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base);
/* Initialize the res_hint to 16 */
ioc->res_hint = 16;
DBG_INIT(" base %p", pdir_base);
/* Initialize the spinlock */
spin_lock_init(&ioc->res_lock);
/*
** Chainid is the upper most bits of an IOVP used to determine
** which TLB entry an IOVP will use.
*/
ioa->chainid_shift = get_order(iova_space_size)+PAGE_SHIFT-CCIO_CHAINID_SHIFT;
DBG_INIT(" chainid_shift 0x%x\n", ioa->chainid_shift);
}
static void
ccio_hw_init(struct ccio_device *ioa)
{
int i;
ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
/*
** Initialize IOA hardware
*/
WRITE_U32(CCIO_CHAINID_MASK << ioa->chainid_shift, &ioa->ccio_hpa->io_chain_id_mask);
WRITE_U32(virt_to_phys(ioa->pdir_base), &ioa->ccio_hpa->io_pdir_base);
WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
&ioc->ioc_hpa->io_chain_id_mask);
WRITE_U32(virt_to_phys(ioc->pdir_base),
&ioc->ioc_hpa->io_pdir_base);
/*
** Go to "Virtual Mode"
*/
WRITE_U32(IOA_NORMAL_MODE, &ioa->ccio_hpa->io_control);
WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_hpa->io_control);
/*
** Initialize all I/O TLB entries to 0 (Valid bit off).
*/
WRITE_U32(0, &ioa->ccio_hpa->io_tlb_entry_m);
WRITE_U32(0, &ioa->ccio_hpa->io_tlb_entry_l);
WRITE_U32(0, &ioc->ioc_hpa->io_tlb_entry_m);
WRITE_U32(0, &ioc->ioc_hpa->io_tlb_entry_l);
for (i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioa->chainid_shift)),
&ioa->ccio_hpa->io_command);
for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
&ioc->ioc_hpa->io_command);
}
}
static void
ccio_resmap_init(struct ccio_device *ioa)
ccio_init_resource(struct resource *res, char *name, unsigned long ioaddr)
{
u32 res_size;
int result;
/*
** Ok...we do more than just init resource map
*/
ioa->ccio_lock = SPIN_LOCK_UNLOCKED;
res->flags = IORESOURCE_MEM;
res->start = (unsigned long)(signed) __raw_readl(ioaddr) << 16;
res->end = (unsigned long)(signed) (__raw_readl(ioaddr + 4) << 16) - 1;
if (res->end + 1 == res->start)
return;
res->name = name;
result = request_resource(&iomem_resource, res);
if (result < 0) {
printk(KERN_ERR "%s: failed to claim CCIO bus address space (%08lx,%08lx)\n",
__FILE__, res->start, res->end);
}
}
ioa->res_hint = 16; /* next available IOVP - circular search */
static void __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
/* resource map size dictated by pdir_size */
res_size = ioa->pdir_size/sizeof(u64); /* entries */
res_size >>= 3; /* convert bit count to byte count */
DBG_INIT(__FUNCTION__ "() res_size 0x%x\n", res_size);
sprintf(name, "GSC Bus [%d/]", ioc->hw_path);
ioa->res_size = res_size;
ioa->res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
if (NULL == ioa->res_map)
{
panic(__FILE__ ":" __FUNCTION__ "() could not allocate resource map\n");
}
memset(ioa->res_map, 0, res_size);
ccio_init_resource(res, name, (unsigned long)&ioc->ioc_hpa->io_io_low);
ccio_init_resource(res + 1, name,
(unsigned long)&ioc->ioc_hpa->io_io_low_hv);
}
/* CUJO20 KLUDGE start */
static struct {
u16 hversion;
u8 spa;
u8 type;
u32 foo[3]; /* 16 bytes total */
} cujo_iodc __attribute__ ((aligned (64)));
static unsigned long cujo_result[32] __attribute__ ((aligned (16))) = {0,0,0,0};
/*
** CUJO 2.0 incorrectly decodes a memory access for specific
** pages (every page at specific iotlb locations dependent
** upon where the cujo is flexed - diff on raven/firehawk.
** resulting in an hpmc and/or silent data corruption.
** Workaround is to prevent use of those I/O TLB entries
** by marking the suspect bitmap range entries as busy.
*/
static void
ccio_cujo20_hack(struct ccio_device *ioa)
static void expand_ioc_area(struct ioc *ioc, unsigned long size,
unsigned long min, unsigned long max, unsigned long align)
{
unsigned long status;
unsigned int idx;
u8 *res_ptr = ioa->res_map;
u32 iovp=0x0;
unsigned long mask;
#ifdef NASTY_HACK_FOR_K_CLASS
__raw_writel(0xfffff600, (unsigned long)&(ioc->ioc_hpa->io_io_high));
ioc->mmio_region[0].end = 0xf5ffffff;
#endif
}
status = pdc_iodc_read( &cujo_result, (void *) CUJO_RAVEN_LOC, 0, &cujo_iodc, 16);
if (status == 0) {
if (cujo_iodc.hversion==CUJO_20_BADHVERS)
iovp = CUJO_20_BADPAGE1;
} else {
status = pdc_iodc_read( &cujo_result, (void *) CUJO_FIREHAWK_LOC, 0, &cujo_iodc, 16);
if (status == 0) {
if (cujo_iodc.hversion==CUJO_20_BADHVERS)
iovp = CUJO_20_BADPAGE2;
static struct resource *ccio_get_resource(struct ioc* ioc,
const struct parisc_device *dev)
{
if (!ioc) {
return &iomem_resource;
} else if ((ioc->mmio_region->start <= dev->hpa) &&
(dev->hpa < ioc->mmio_region->end)) {
return ioc->mmio_region;
} else if (((ioc->mmio_region + 1)->start <= dev->hpa) &&
(dev->hpa < (ioc->mmio_region + 1)->end)) {
return ioc->mmio_region + 1;
} else {
/* not a defective system */
return;
}
return NULL;
}
}
printk(MODULE_NAME ": Cujo 2.0 bug needs a work around\n");
ccio_cujo_bug = 1;
int ccio_allocate_resource(const struct parisc_device *dev,
struct resource *res, unsigned long size,
unsigned long min, unsigned long max, unsigned long align,
void (*alignf)(void *, struct resource *, unsigned long, unsigned long),
void *alignf_data)
{
struct ioc *ioc = ccio_get_iommu(dev);
struct resource *parent = ccio_get_resource(ioc, dev);
if (!parent)
return -EBUSY;
/*
** mark bit entries that match "bad page"
*/
idx = PDIR_INDEX(iovp)>>3;
mask = 0xff;
while(idx * sizeof(u8) < ioa->res_size) {
res_ptr[idx] |= mask;
idx += (PDIR_INDEX(CUJO_20_STEP)>>3);
ccio_used_pages += 8;
ccio_used_bytes += 1;
}
if (!allocate_resource(parent, res, size, min, max, align, alignf,
alignf_data))
return 0;
expand_ioc_area(ioc, size, min, max, align);
return allocate_resource(parent, res, size, min, max, align, alignf,
alignf_data);
}
/* CUJO20 KLUDGE end */
#ifdef CONFIG_PROC_FS
static int ccio_proc_info(char *buf, char **start, off_t offset, int len)
int ccio_request_resource(const struct parisc_device *dev,
struct resource *res)
{
unsigned long i = 0;
struct ccio_device *ioa = ccio_list;
unsigned long *res_ptr = (unsigned long *)ioa->res_map;
unsigned long total_pages = ioa->res_size << 3; /* 8 bits per byte */
sprintf(buf, "%s\nCujo 2.0 bug : %s\n",
parisc_getHWdescription(ioa->iodc->hw_type, ioa->iodc->hversion,
ioa->iodc->sversion),
(ccio_cujo_bug ? "yes" : "no"));
struct ioc *ioc = ccio_get_iommu(dev);
struct resource *parent = ccio_get_resource(ioc, dev);
sprintf(buf, "%sIO pdir size : %d bytes (%d entries)\n",
buf, ((ioa->res_size << 3) * sizeof(u64)), /* 8 bits per byte */
ioa->res_size << 3); /* 8 bits per byte */
return request_resource(parent, res);
}
/**
* ccio_probe - Determine if ccio should claim this device.
* @dev: The device which has been found
*
* Determine if ccio should claim this chip (return 0) or not (return 1).
* If so, initialize the chip and tell other partners in crime they
* have work to do.
*/
static int ccio_probe(struct parisc_device *dev)
{
int i;
struct ioc *ioc, **ioc_p = &ioc_list;
sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
buf, ioa->res_size, ioa->res_size << 3); /* 8 bits per byte */
ioc = kmalloc(sizeof(struct ioc), GFP_KERNEL);
if (ioc == NULL) {
printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
return 1;
}
memset(ioc, 0, sizeof(struct ioc));
strcat(buf, " total: free: used: % used:\n");
sprintf(buf, "%sblocks %8d %8ld %8ld %8ld%%\n", buf, ioa->res_size,
ioa->res_size - ccio_used_bytes, ccio_used_bytes,
(ccio_used_bytes * 100) / ioa->res_size);
ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
sprintf(buf, "%spages %8ld %8ld %8ld %8ld%%\n", buf, total_pages,
total_pages - ccio_used_pages, ccio_used_pages,
(ccio_used_pages * 100 / total_pages));
printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name, dev->hpa);
sprintf(buf, "%sconsistent %8ld %8ld\n", buf,
ccio_alloc_size, ccio_free_size);
for (i = 0; i < ioc_count; i++) {
ioc_p = &(*ioc_p)->next;
}
*ioc_p = ioc;
strcat(buf, "\nResource bitmap:\n");
ioc->hw_path = dev->hw_path;
ioc->ioc_hpa = (struct ioa_registers *)dev->hpa;
ccio_ioc_init(ioc);
ccio_init_resources(ioc);
hppa_dma_ops = &ccio_ops;
for(; i < (ioa->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
len += sprintf(buf, "%s%08lx ", buf, *res_ptr);
if (ioc_count == 0) {
/* XXX: Create separate entries for each ioc */
create_proc_read_entry(MODULE_NAME, S_IRWXU, proc_runway_root,
ccio_proc_info, NULL);
create_proc_read_entry(MODULE_NAME"-bitmap", S_IRWXU,
proc_runway_root, ccio_resource_map, NULL);
}
strcat(buf, "\n");
return strlen(buf);
ioc_count++;
return 0;
}
#endif
/*
** Determine if ccio should claim this chip (return 0) or not (return 1).
** If so, initialize the chip and tell other partners in crime they
** have work to do.
*/
static int
ccio_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
struct pci_dev * ccio_get_fake(const struct parisc_device *dev)
{
struct ccio_device *ioa;
struct ioc *ioc;
printk("%s found %s at 0x%p\n", dri->name, dri->version, d->hpa);
dev = find_pa_parent_type(dev, HPHW_IOA);
if(!dev)
return NULL;
if (ccio_list) {
printk(MODULE_NAME ": already initialized one device\n");
return(0);
}
ioc = ccio_find_ioc(dev->hw_path);
if(!ioc)
return NULL;
ioa = kmalloc(sizeof(struct ccio_device), GFP_KERNEL);
if (NULL == ioa)
{
printk(MODULE_NAME " - couldn't alloc ccio_device\n");
return(1);
if(ioc->fake_pci_dev)
return ioc->fake_pci_dev;
ioc->fake_pci_dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
if(ioc->fake_pci_dev == NULL) {
printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
return NULL;
}
memset(ioa, 0, sizeof(struct ccio_device));
memset(ioc->fake_pci_dev, 0, sizeof(struct pci_dev));
/*
** ccio list is used mainly as a kluge to support a single instance.
** Eventually, with core dumps, it'll be useful for debugging.
*/
ccio_list = ioa;
ioa->iodc = d;
#if 1
/* KLUGE: determine IOA hpa based on GSC port value.
** Needed until we have a PA bus walk. Can only discover IOA via
** walking the architected PA MMIO space as described by the I/O ACD.
** "Legacy" PA Firmware only tells us about unarchitected devices
** that can't be detected by PA/EISA/PCI bus walks.
*/
switch((long) d->hpa) {
case 0xf3fbf000L: /* C110 IOA0 LBC (aka GSC port) */
/* ccio_hpa same as C200 IOA0 */
case 0xf203f000L: /* C180/C200/240/C360 IOA0 LBC (aka GSC port) */
ioa->ccio_hpa = (struct ioa_registers *) 0xfff88000L;
break;
case 0xf103f000L: /* C180/C200/240/C360 IOA1 LBC (aka GSC port) */
ioa->ccio_hpa = (struct ioa_registers *) 0xfff8A000L;
break;
default:
panic("ccio-dma.c doesn't know this GSC port Address!\n");
break;
};
#else
ioa->ccio_hpa = d->hpa;
#endif
ioc->fake_pci_dev->sysdata = kmalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
if(ioc->fake_pci_dev->sysdata == NULL) {
printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
return NULL;
}
ccio_alloc_pdir(ioa);
ccio_hw_init(ioa);
ccio_resmap_init(ioa);
HBA_DATA(ioc->fake_pci_dev->sysdata)->iommu = ioc;
return ioc->fake_pci_dev;
}
/* CUJO20 KLUDGE start */
ccio_cujo20_hack(ioa);
/* CUJO20 KLUDGE end */
/* We *can't* support JAVA (T600). Venture there at your own risk. */
static struct parisc_device_id ccio_tbl[] = {
{ HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb }, /* U2 */
{ HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb }, /* UTurn */
{ 0, }
};
hppa_dma_ops = &ccio_ops;
static struct parisc_driver ccio_driver = {
name: "U2/Uturn",
id_table: ccio_tbl,
probe: ccio_probe,
};
create_proc_info_entry(MODULE_NAME, 0, proc_runway_root, ccio_proc_info);
return(0);
/**
* ccio_init - ccio initalization procedure.
*
* Register this driver.
*/
void __init ccio_init(void)
{
register_parisc_driver(&ccio_driver);
}
......@@ -56,26 +56,6 @@
#define UTURN_IOA_RUNWAY 0x581
#define UTURN_BC_GSC 0x502
static int ccio_driver_callback(struct hp_device *, struct pa_iodc_driver *);
static struct pa_iodc_driver ccio_drivers_for[] = {
{HPHW_BCPORT, U2_BC_GSC, 0x0, 0xb, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "U2 I/O MMU", (void *) ccio_driver_callback},
{HPHW_BCPORT, UTURN_BC_GSC, 0x0, 0xb, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "Uturn I/O MMU", (void *) ccio_driver_callback},
{0,0,0,0,0,0,
0,
(char *) NULL, (char *) NULL, (void *) NULL }
};
#define IS_U2(id) ( \
(((id)->hw_type == HPHW_IOA) && ((id)->hversion == U2_IOA_RUNWAY)) || \
(((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == U2_BC_GSC)) \
......@@ -86,17 +66,10 @@ static struct pa_iodc_driver ccio_drivers_for[] = {
(((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == UTURN_BC_GSC)) \
)
void __init ccio_init(void)
{
register_driver(ccio_drivers_for);
}
static int ccio_dma_supported( struct pci_dev *dev, u64 mask)
{
if (dev == NULL) {
printk(MODULE_NAME ": EISA/ISA/et al not supported\n");
printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
BUG();
return(0);
}
......@@ -182,8 +155,6 @@ static struct pci_dma_ops ccio_ops = {
ccio_unmap_sg,
NULL, /* dma_sync_single : NOP for U2 */
NULL, /* dma_sync_sg : ditto */
};
......@@ -193,9 +164,11 @@ static struct pci_dma_ops ccio_ops = {
** have work to do.
*/
static int
ccio_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
ccio_probe(struct parisc_device *dev)
{
printk("%s found %s at 0x%p\n", dri->name, dri->version, d->hpa);
printk(KERN_INFO "%s found %s at 0x%lx\n", MODULE_NAME,
dev->id.hversion == U2_BC_GSC ? "U2" : "UTurn",
dev->hpa);
/*
** FIXME - should check U2 registers to verify it's really running
......@@ -210,3 +183,20 @@ ccio_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
hppa_dma_ops = &ccio_ops;
return 0;
}
static struct parisc_device_id ccio_tbl[] = {
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, U2_BC_GSC, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, UTURN_BC_GSC, 0xc },
{ 0, }
};
static struct parisc_driver ccio_driver = {
name: "U2/Uturn",
id_table: ccio_tbl,
probe: ccio_probe,
};
void __init ccio_init(void)
{
register_parisc_driver(&ccio_driver);
}
/*
** DINO manager
**
** (c) Copyright 1999 Red Hat Software
** (c) Copyright 1999 SuSE GmbH
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This module provides access to Dino PCI bus (config/IOport spaces)
** and helps manage Dino IRQ lines.
**
** Dino interrupt handling is a bit complicated.
** Dino always writes to the broadcast EIR via irr0 for now.
** (BIG WARNING: using broadcast EIR is a really bad thing for SMP!)
** Only one processor interrupt is used for the 11 IRQ line
** inputs to dino.
**
** The different between Built-in Dino and Card-Mode
** dino is in chip initialization and pci device initialization.
**
** Linux drivers can only use Card-Mode Dino if pci devices I/O port
** BARs are configured and used by the driver. Programming MMIO address
** requires substantial knowledge of available Host I/O address ranges
** is currently not supported. Port/Config accessor functions are the
** same. "BIOS" differences are handled within the existing routines.
*/
/* Changes :
** 2001-06-14 : Clement Moyroud (moyroudc@esiee.fr)
** - added support for the integrated RS232.
*/
/*
** TODO: create a virtual address for each Dino HPA.
** GSC code might be able to do this since IODC data tells us
** how many pages are used. PCI subsystem could (must?) do this
** for PCI drivers devices which implement/use MMIO registers.
*/
#include <linux/config.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h> /* for struct irqaction */
#include <linux/spinlock.h> /* for spinlock_t and prototypes */
#include <asm/pdc.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/hardware.h>
#include "gsc.h"
#undef DINO_DEBUG
#ifdef DINO_DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
/*
** Config accessor functions only pass in the 8-bit bus number
** and not the 8-bit "PCI Segment" number. Each Dino will be
** assigned a PCI bus number based on "when" it's discovered.
**
** The "secondary" bus number is set to this before calling
** pci_scan_bus(). If any PPB's are present, the scan will
** discover them and update the "secondary" and "subordinate"
** fields in Dino's pci_bus structure.
**
** Changes in the configuration *will* result in a different
** bus number for each dino.
*/
#define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA)
#define DINO_IAR0 0x004
#define DINO_IODC_ADDR 0x008
#define DINO_IODC_DATA_0 0x008
#define DINO_IODC_DATA_1 0x008
#define DINO_IRR0 0x00C
#define DINO_IAR1 0x010
#define DINO_IRR1 0x014
#define DINO_IMR 0x018
#define DINO_IPR 0x01C
#define DINO_TOC_ADDR 0x020
#define DINO_ICR 0x024
#define DINO_ILR 0x028
#define DINO_IO_COMMAND 0x030
#define DINO_IO_STATUS 0x034
#define DINO_IO_CONTROL 0x038
#define DINO_IO_GSC_ERR_RESP 0x040
#define DINO_IO_ERR_INFO 0x044
#define DINO_IO_PCI_ERR_RESP 0x048
#define DINO_IO_FBB_EN 0x05c
#define DINO_IO_ADDR_EN 0x060
#define DINO_PCI_ADDR 0x064
#define DINO_CONFIG_DATA 0x068
#define DINO_IO_DATA 0x06c
#define DINO_MEM_DATA 0x070 /* Dino 3.x only */
#define DINO_GSC2X_CONFIG 0x7b4
#define DINO_GMASK 0x800
#define DINO_PAMR 0x804
#define DINO_PAPR 0x808
#define DINO_DAMODE 0x80c
#define DINO_PCICMD 0x810
#define DINO_PCISTS 0x814
#define DINO_MLTIM 0x81c
#define DINO_BRDG_FEAT 0x820
#define DINO_PCIROR 0x824
#define DINO_PCIWOR 0x828
#define DINO_TLTIM 0x830
#define DINO_IRQS 11 /* bits 0-10 are architected */
#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */
#define DINO_MASK_IRQ(x) (1<<(x))
#define PCIINTA 0x001
#define PCIINTB 0x002
#define PCIINTC 0x004
#define PCIINTD 0x008
#define PCIINTE 0x010
#define PCIINTF 0x020
#define GSCEXTINT 0x040
/* #define xxx 0x080 - bit 7 is "default" */
/* #define xxx 0x100 - bit 8 not used */
/* #define xxx 0x200 - bit 9 not used */
#define RS232INT 0x400
struct dino_device
{
struct pci_hba_data hba; /* 'C' inheritance - must be first */
spinlock_t dinosaur_pen;
unsigned long txn_addr; /* EIR addr to generate interrupt */
u32 txn_data; /* EIR data assign to each dino */
int irq; /* Virtual IRQ dino uses */
struct irq_region *dino_region; /* region for this Dino */
u32 imr; /* IRQ's which are enabled */
#ifdef DINO_DEBUG
unsigned int dino_irr0; /* save most recent IRQ line stat */
#endif
};
/* Looks nice and keeps the compiler happy */
#define DINO_DEV(d) ((struct dino_device *) d)
/*
* Dino Configuration Space Accessor Functions
*/
#define DINO_CFG_TOK(bus,dfn,pos) ((u32) ((bus)<<16 | (dfn)<<8 | (pos)))
static int dino_cfg_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct dino_device *d = DINO_DEV(bus->sysdata);
u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
unsigned long base_addr = d->hba.base_addr;
unsigned long flags;
spin_lock_irqsave(&d->dinosaur_pen, flags);
/* tell HW which CFG address */
gsc_writel(v, base_addr + DINO_PCI_ADDR);
/* generate cfg read cycle */
if (size == 1) {
*val = gsc_readb(base_addr + DINO_CONFIG_DATA + (where & 3));
} else if (size == 2) {
*val = le16_to_cpu(gsc_readw(base_addr +
DINO_CONFIG_DATA + (where & 2)));
} else if (size == 4) {
*val = le32_to_cpu(gsc_readl(base_addr + DINO_CONFIG_DATA));
}
spin_unlock_irqrestore(&d->dinosaur_pen, flags);
return 0;
}
/*
* Dino address stepping "feature":
* When address stepping, Dino attempts to drive the bus one cycle too soon
* even though the type of cycle (config vs. MMIO) might be different.
* The read of Ven/Prod ID is harmless and avoids Dino's address stepping.
*/
static int dino_cfg_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct dino_device *d = DINO_DEV(bus->sysdata);
u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
unsigned long base_addr = d->hba.base_addr;
unsigned long flags;
spin_lock_irqsave(&d->dinosaur_pen, flags);
/* avoid address stepping feature */
gsc_writel(v & 0xffffff00, base_addr + DINO_PCI_ADDR);
gsc_readl(base_addr + DINO_CONFIG_DATA);
/* tell HW which CFG address */
gsc_writel(v, base_addr + DINO_PCI_ADDR);
/* generate cfg read cycle */
if (size == 1) {
gsc_writeb(val, base_addr + DINO_CONFIG_DATA + (where & 3));
} else if (size == 2) {
gsc_writew(cpu_to_le16(val),
base_addr + DINO_CONFIG_DATA + (where & 2));
} else if (size == 4) {
gsc_writel(cpu_to_le32(val), base_addr + DINO_CONFIG_DATA);
}
spin_unlock_irqrestore(&d->dinosaur_pen, flags);
return 0;
}
static struct pci_ops dino_cfg_ops = {
.read = dino_cfg_read,
.write = dino_cfg_write,
};
/*
* Dino "I/O Port" Space Accessor Functions
*
* Many PCI devices don't require use of I/O port space (eg Tulip,
* NCR720) since they export the same registers to both MMIO and
* I/O port space. Performance is going to stink if drivers use
* I/O port instead of MMIO.
*/
#define cpu_to_le8(x) (x)
#define le8_to_cpu(x) (x)
#define DINO_PORT_IN(type, size, mask) \
static u##size dino_in##size (struct pci_hba_data *d, u16 addr) \
{ \
u##size v; \
unsigned long flags; \
spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \
/* tell HW which IO Port address */ \
gsc_writel((u32) addr & ~3, d->base_addr + DINO_PCI_ADDR); \
/* generate I/O PORT read cycle */ \
v = gsc_read##type(d->base_addr+DINO_IO_DATA+(addr&mask)); \
spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \
return le##size##_to_cpu(v); \
}
DINO_PORT_IN(b, 8, 3)
DINO_PORT_IN(w, 16, 2)
DINO_PORT_IN(l, 32, 0)
#define DINO_PORT_OUT(type, size, mask) \
static void dino_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
{ \
unsigned long flags; \
spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \
/* tell HW which CFG address */ \
gsc_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \
/* generate cfg write cycle */ \
gsc_write##type(cpu_to_le##size(val), d->base_addr+DINO_IO_DATA+(addr&mask)); \
spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \
}
DINO_PORT_OUT(b, 8, 3)
DINO_PORT_OUT(w, 16, 2)
DINO_PORT_OUT(l, 32, 0)
struct pci_port_ops dino_port_ops = {
inb: dino_in8,
inw: dino_in16,
inl: dino_in32,
outb: dino_out8,
outw: dino_out16,
outl: dino_out32
};
static void
dino_mask_irq(void *irq_dev, int irq)
{
struct dino_device *dino_dev = DINO_DEV(irq_dev);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, irq_dev, irq);
if (NULL == irq_dev || irq > DINO_IRQS || irq < 0) {
printk(KERN_WARNING "%s(0x%lx, %d) - not a dino irq?\n",
__FUNCTION__, (long) irq_dev, irq);
BUG();
} else {
/*
** Clear the matching bit in the IMR register
*/
dino_dev->imr &= ~(DINO_MASK_IRQ(irq));
gsc_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
}
}
static void
dino_unmask_irq(void *irq_dev, int irq)
{
struct dino_device *dino_dev = DINO_DEV(irq_dev);
u32 tmp;
DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, irq_dev, irq);
if (NULL == irq_dev || irq > DINO_IRQS) {
printk(KERN_WARNING "%s(): %d not a dino irq?\n",
__FUNCTION__, irq);
BUG();
return;
}
/* set the matching bit in the IMR register */
dino_dev->imr |= DINO_MASK_IRQ(irq); /* used in dino_isr() */
gsc_writel( dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
/* Emulate "Level Triggered" Interrupt
** Basically, a driver is blowing it if the IRQ line is asserted
** while the IRQ is disabled. But tulip.c seems to do that....
** Give 'em a kluge award and a nice round of applause!
**
** The gsc_write will generate an interrupt which invokes dino_isr().
** dino_isr() will read IPR and find nothing. But then catch this
** when it also checks ILR.
*/
tmp = gsc_readl(dino_dev->hba.base_addr+DINO_ILR);
if (tmp & DINO_MASK_IRQ(irq)) {
DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
__FUNCTION__, tmp);
gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
}
}
static void
dino_enable_irq(void *irq_dev, int irq)
{
struct dino_device *dino_dev = DINO_DEV(irq_dev);
/*
** clear pending IRQ bits
**
** This does NOT change ILR state!
** See comments in dino_unmask_irq() for ILR usage.
*/
gsc_readl(dino_dev->hba.base_addr+DINO_IPR);
dino_unmask_irq(irq_dev, irq);
}
static struct irq_region_ops dino_irq_ops = {
disable_irq: dino_mask_irq, /* ??? */
enable_irq: dino_enable_irq,
mask_irq: dino_mask_irq,
unmask_irq: dino_unmask_irq
};
/*
* Handle a Processor interrupt generated by Dino.
*
* ilr_loop counter is a kluge to prevent a "stuck" IRQ line from
* wedging the CPU. Could be removed or made optional at some point.
*/
static void
dino_isr(int irq, void *intr_dev, struct pt_regs *regs)
{
struct dino_device *dino_dev = DINO_DEV(intr_dev);
u32 mask;
int ilr_loop = 100;
extern void do_irq(struct irqaction *a, int i, struct pt_regs *p);
/* read and acknowledge pending interrupts */
#ifdef DINO_DEBUG
dino_dev->dino_irr0 =
#endif
mask = gsc_readl(dino_dev->hba.base_addr+DINO_IRR0) & DINO_IRR_MASK;
ilr_again:
while (mask)
{
int irq;
/*
* Perform a binary search on set bits.
* `Less than Fatal' and PS2 interupts aren't supported.
*/
if (mask & 0xf) {
if (mask & 0x3) {
irq = (mask & 0x1) ? 0 : 1; /* PCI INT A, B */
} else {
irq = (mask & 0x4) ? 2 : 3; /* PCI INT C, D */
}
} else {
if (mask & 0x30) {
irq = (mask & 0x10) ? 4 : 5; /* PCI INT E, F */
} else {
irq = (mask & 0x40) ? 6 : 10; /* GSC, RS232 */
}
}
mask &= ~(1<<irq);
DBG(KERN_WARNING "%s(%x, %p) mask %0x\n",
__FUNCTION__, irq, intr_dev, mask);
do_irq(&dino_dev->dino_region->action[irq],
dino_dev->dino_region->data.irqbase + irq,
regs);
}
/* Support for level triggered IRQ lines.
**
** Dropping this support would make this routine *much* faster.
** But since PCI requires level triggered IRQ line to share lines...
** device drivers may assume lines are level triggered (and not
** edge triggered like EISA/ISA can be).
*/
mask = gsc_readl(dino_dev->hba.base_addr+DINO_ILR) & dino_dev->imr;
if (mask) {
if (--ilr_loop > 0)
goto ilr_again;
printk("Dino %lx: stuck interrupt %d\n", dino_dev->hba.base_addr, mask);
}
}
static int dino_choose_irq(struct parisc_device *dev)
{
int irq = -1;
switch (dev->id.sversion) {
case 0x00084: irq = 8; break; /* PS/2 */
case 0x0008c: irq = 10; break; /* RS232 */
case 0x00096: irq = 8; break; /* PS/2 */
}
return irq;
}
static void __init
dino_bios_init(void)
{
DBG("dino_bios_init\n");
}
/*
* dino_card_setup - Set up the memory space for a Dino in card mode.
* @bus: the bus under this dino
*
* Claim an 8MB chunk of unused IO space and call the generic PCI routines
* to set up the addresses of the devices on this bus.
*/
#define _8MB 0x00800000UL
static void __init
dino_card_setup(struct pci_bus *bus, unsigned long base_addr)
{
int i;
struct dino_device *dino_dev = DINO_DEV(bus->sysdata);
struct resource *res;
res = &dino_dev->hba.lmmio_space;
res->flags = IORESOURCE_MEM;
if (ccio_allocate_resource(dino_dev->hba.dev, res, _8MB,
(unsigned long) 0xfffffffff0000000UL | _8MB,
0xffffffffffffffffUL &~ _8MB, _8MB,
NULL, NULL) < 0) {
printk(KERN_WARNING "Dino: Failed to allocate memory region\n");
return;
}
bus->resource[1] = res;
bus->resource[0] = &(dino_dev->hba.io_space);
/* Now tell dino what range it has */
for (i = 1; i < 31; i++) {
if (res->start == (0xfffffffff0000000UL | i * _8MB))
break;
}
gsc_writel(1 << i, base_addr + DINO_IO_ADDR_EN);
pcibios_assign_unassigned_resources(bus);
}
static void __init
dino_card_fixup(struct pci_dev *dev)
{
u32 irq_pin;
/*
** REVISIT: card-mode PCI-PCI expansion chassis do exist.
** Not sure they were ever productized.
** Die here since we'll die later in dino_inb() anyway.
*/
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
panic("Card-Mode Dino: PCI-PCI Bridge not supported\n");
}
/*
** Set Latency Timer to 0xff (not a shared bus)
** Set CACHELINE_SIZE.
*/
dino_cfg_write(dev->bus, dev->devfn, PCI_CACHE_LINE_SIZE, 16, 0xff00 | L1_CACHE_BYTES/4);
/*
** Program INT_LINE for card-mode devices.
** The cards are hardwired according to this algorithm.
** And it doesn't matter if PPB's are present or not since
** the IRQ lines bypass the PPB.
**
** "-1" converts INTA-D (1-4) to PCIINTA-D (0-3) range.
** The additional "-1" adjusts for skewing the IRQ<->slot.
*/
dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 8, &irq_pin);
dev->irq = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
/* Shouldn't really need to do this but it's in case someone tries
** to bypass PCI services and look at the card themselves.
*/
dino_cfg_write(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 8, dev->irq);
}
static void __init
dino_fixup_bus(struct pci_bus *bus)
{
struct list_head *ln;
struct pci_dev *dev;
struct dino_device *dino_dev = DINO_DEV(bus->sysdata);
int port_base = HBA_PORT_BASE(dino_dev->hba.hba_num);
DBG(KERN_WARNING "%s(0x%p) bus %d sysdata 0x%p\n",
__FUNCTION__, bus, bus->secondary, bus->sysdata);
/* Firmware doesn't set up card-mode dino, so we have to */
if (is_card_dino(&dino_dev->hba.dev->id))
dino_card_setup(bus, dino_dev->hba.base_addr);
/* If this is a PCI-PCI Bridge, read the window registers etc */
if (bus->self)
pci_read_bridge_bases(bus);
list_for_each(ln, &bus->devices) {
int i;
dev = pci_dev_b(ln);
if (is_card_dino(&dino_dev->hba.dev->id))
dino_card_fixup(dev);
/*
** P2PB's only have 2 BARs, no IRQs.
** I'd like to just ignore them for now.
*/
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
continue;
/* Adjust the I/O Port space addresses */
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if (res->flags & IORESOURCE_IO) {
res->start |= port_base;
res->end |= port_base;
}
#ifdef __LP64__
/* Sign Extend MMIO addresses */
else if (res->flags & IORESOURCE_MEM) {
res->start |= 0xffffffff00000000UL;
res->end |= 0xffffffff00000000UL;
}
#endif
}
/* Adjust INT_LINE for that busses region */
dev->irq = dino_dev->dino_region->data.irqbase + dev->irq;
}
}
struct pci_bios_ops dino_bios_ops = {
.init = dino_bios_init,
.fixup_bus = dino_fixup_bus
};
/*
* Initialise a DINO controller chip
*/
static void __init
dino_card_init(struct dino_device *dino_dev)
{
u32 brdg_feat = 0x00784e05;
gsc_writel(0x00000000, dino_dev->hba.base_addr+DINO_GMASK);
gsc_writel(0x00000001, dino_dev->hba.base_addr+DINO_IO_FBB_EN);
gsc_writel(0x00000000, dino_dev->hba.base_addr+DINO_ICR);
#if 1
/* REVISIT - should be a runtime check (eg if (CPU_IS_PCX_L) ...) */
/*
** PCX-L processors don't support XQL like Dino wants it.
** PCX-L2 ignore XQL signal and it doesn't matter.
*/
brdg_feat &= ~0x4; /* UXQL */
#endif
gsc_writel( brdg_feat, dino_dev->hba.base_addr+DINO_BRDG_FEAT);
/*
** Don't enable address decoding until we know which I/O range
** currently is available from the host. Only affects MMIO
** and not I/O port space.
*/
gsc_writel(0x00000000, dino_dev->hba.base_addr+DINO_IO_ADDR_EN);
gsc_writel(0x00000000, dino_dev->hba.base_addr+DINO_DAMODE);
gsc_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIROR);
gsc_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIWOR);
gsc_writel(0x00000040, dino_dev->hba.base_addr+DINO_MLTIM);
gsc_writel(0x00000080, dino_dev->hba.base_addr+DINO_IO_CONTROL);
gsc_writel(0x0000008c, dino_dev->hba.base_addr+DINO_TLTIM);
/* Disable PAMR before writing PAPR */
gsc_writel(0x0000007e, dino_dev->hba.base_addr+DINO_PAMR);
gsc_writel(0x0000007f, dino_dev->hba.base_addr+DINO_PAPR);
gsc_writel(0x00000000, dino_dev->hba.base_addr+DINO_PAMR);
/*
** Dino ERS encourages enabling FBB (0x6f).
** We can't until we know *all* devices below us can support it.
** (Something in device configuration header tells us).
*/
gsc_writel(0x0000004f, dino_dev->hba.base_addr+DINO_PCICMD);
/* Somewhere, the PCI spec says give devices 1 second
** to recover from the #RESET being de-asserted.
** Experience shows most devices only need 10ms.
** This short-cut speeds up booting significantly.
*/
mdelay(pci_post_reset_delay);
}
static int __init
dino_bridge_init(struct dino_device *dino_dev, const char *name)
{
unsigned long io_addr, bpos;
int result;
struct resource *res;
/*
* Decoding IO_ADDR_EN only works for Built-in Dino
* since PDC has already initialized this.
*/
io_addr = gsc_readl(dino_dev->hba.base_addr + DINO_IO_ADDR_EN);
if (io_addr == 0) {
printk(KERN_WARNING "%s: No PCI devices enabled.\n", name);
return -ENODEV;
}
for (bpos = 0; (io_addr & (1 << bpos)) == 0; bpos++)
;
res = &dino_dev->hba.lmmio_space;
res->flags = IORESOURCE_MEM;
res->start = (unsigned long)(signed int)(0xf0000000 | (bpos << 23));
res->end = res->start + 8 * 1024 * 1024 - 1;
result = ccio_request_resource(dino_dev->hba.dev, res);
if (result < 0) {
printk(KERN_ERR "%s: failed to claim PCI Bus address space!\n", name);
return result;
}
return 0;
}
static int __init dino_common_init(struct parisc_device *dev,
struct dino_device *dino_dev, const char *name)
{
int status;
u32 eim;
struct gsc_irq gsc_irq;
struct resource *res;
pcibios_register_hba(&dino_dev->hba);
pci_bios = &dino_bios_ops; /* used by pci_scan_bus() */
pci_port = &dino_port_ops;
/*
** Note: SMP systems can make use of IRR1/IAR1 registers
** But it won't buy much performance except in very
** specific applications/configurations. Note Dino
** still only has 11 IRQ input lines - just map some of them
** to a different processor.
*/
dino_dev->irq = gsc_alloc_irq(&gsc_irq);
dino_dev->txn_addr = gsc_irq.txn_addr;
dino_dev->txn_data = gsc_irq.txn_data;
eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
/*
** Dino needs a PA "IRQ" to get a processor's attention.
** arch/parisc/kernel/irq.c returns an EIRR bit.
*/
if (dino_dev->irq < 0) {
printk(KERN_WARNING "%s: gsc_alloc_irq() failed\n", name);
return 1;
}
status = request_irq(dino_dev->irq, dino_isr, 0, name, dino_dev);
if (status) {
printk(KERN_WARNING "%s: request_irq() failed with %d\n",
name, status);
return 1;
}
/*
** Tell generic interrupt support we have 11 bits which need
** be checked in the interrupt handler.
*/
dino_dev->dino_region = alloc_irq_region(DINO_IRQS, &dino_irq_ops,
name, dino_dev);
if (NULL == dino_dev->dino_region) {
printk(KERN_WARNING "%s: alloc_irq_region() failed\n", name);
return 1;
}
/* Support the serial port which is sometimes attached on built-in
* Dino / Cujo chips.
*/
fixup_child_irqs(dev, dino_dev->dino_region->data.irqbase,
dino_choose_irq);
/*
** This enables DINO to generate interrupts when it sees
** any of it's inputs *change*. Just asserting an IRQ
** before it's enabled (ie unmasked) isn't good enough.
*/
gsc_writel(eim, dino_dev->hba.base_addr+DINO_IAR0);
/*
** Some platforms don't clear Dino's IRR0 register at boot time.
** Reading will clear it now.
*/
gsc_readl(dino_dev->hba.base_addr+DINO_IRR0);
/* allocate I/O Port resource region */
res = &dino_dev->hba.io_space;
if (dev->id.hversion == 0x680 || is_card_dino(&dev->id)) {
res->name = "Dino I/O Port";
dino_dev->hba.lmmio_space.name = "Dino LMMIO";
} else {
res->name = "Cujo I/O Port";
dino_dev->hba.lmmio_space.name = "Cujo LMMIO";
}
res->start = HBA_PORT_BASE(dino_dev->hba.hba_num);
res->end = res->start + (HBA_PORT_SPACE_SIZE - 1);
res->flags = IORESOURCE_IO; /* do not mark it busy ! */
if (request_resource(&ioport_resource, res) < 0) {
printk(KERN_ERR "%s: request I/O Port region failed 0x%lx/%lx (hpa 0x%lx)\n",
name, res->start, res->end, dino_dev->hba.base_addr);
return 1;
}
return 0;
}
#define CUJO_RAVEN_ADDR 0xfffffffff1000000UL
#define CUJO_FIREHAWK_ADDR 0xfffffffff1604000UL
#define CUJO_RAVEN_BADPAGE 0x01003000UL
#define CUJO_FIREHAWK_BADPAGE 0x01607000UL
static const char *dino_vers[] = {
"2.0",
"2.1",
"3.0",
"3.1"
};
static const char *cujo_vers[] = {
"1.0",
"2.0"
};
void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp);
/*
** Determine if dino should claim this chip (return 0) or not (return 1).
** If so, initialize the chip appropriately (card-mode vs bridge mode).
** Much of the initialization is common though.
*/
static int __init
dino_driver_callback(struct parisc_device *dev)
{
struct dino_device *dino_dev; // Dino specific control struct
const char *version = "unknown";
const char *name = "Dino";
int is_cujo = 0;
if (is_card_dino(&dev->id)) {
version = "3.x (card mode)";
} else {
if(dev->id.hversion == 0x680) {
if (dev->id.hversion_rev < 4) {
version = dino_vers[dev->id.hversion_rev];
}
} else {
name = "Cujo";
is_cujo = 1;
if (dev->id.hversion_rev < 2) {
version = cujo_vers[dev->id.hversion_rev];
}
}
}
printk("%s version %s found at 0x%lx\n", name, version, dev->hpa);
if (!request_mem_region(dev->hpa, PAGE_SIZE, name)) {
printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n",
dev->hpa);
return 1;
}
/* Check for bugs */
if (is_cujo && dev->id.hversion_rev == 1) {
#ifdef CONFIG_IOMMU_CCIO
printk(KERN_WARNING "Enabling Cujo 2.0 bug workaround\n");
if (dev->hpa == (unsigned long)CUJO_RAVEN_ADDR) {
ccio_cujo20_fixup(dev->parent, CUJO_RAVEN_BADPAGE);
} else if (dev->hpa == (unsigned long)CUJO_FIREHAWK_ADDR) {
ccio_cujo20_fixup(dev->parent, CUJO_FIREHAWK_BADPAGE);
} else {
printk("Don't recognise Cujo at address 0x%lx, not enabling workaround\n", dev->hpa);
}
#endif
} else if (!is_cujo && !is_card_dino(&dev->id) &&
dev->id.hversion_rev < 3) {
printk(KERN_WARNING
"The GSCtoPCI (Dino hrev %d) bus converter found may exhibit\n"
"data corruption. See Service Note Numbers: A4190A-01, A4191A-01.\n"
"Systems shipped after Aug 20, 1997 will not exhibit this problem.\n"
"Models affected: C180, C160, C160L, B160L, and B132L workstations.\n\n",
dev->id.hversion_rev);
/* REVISIT: why are C200/C240 listed in the README table but not
** "Models affected"? Could be an omission in the original literature.
*/
}
dino_dev = kmalloc(sizeof(struct dino_device), GFP_KERNEL);
if (!dino_dev) {
printk("dino_init_chip - couldn't alloc dino_device\n");
return 1;
}
memset(dino_dev, 0, sizeof(struct dino_device));
dino_dev->hba.dev = dev;
dino_dev->hba.base_addr = dev->hpa; /* faster access */
dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
dino_dev->dinosaur_pen = SPIN_LOCK_UNLOCKED;
dino_dev->hba.iommu = ccio_get_iommu(dev);
if (is_card_dino(&dev->id)) {
dino_card_init(dino_dev);
} else {
dino_bridge_init(dino_dev, name);
}
if (dino_common_init(dev, dino_dev, name))
return 1;
/*
** It's not used to avoid chicken/egg problems
** with configuration accessor functions.
*/
dino_dev->hba.hba_bus = pci_scan_bus(dino_dev->hba.hba_num,
&dino_cfg_ops, dino_dev);
return 0;
}
/*
* Normally, we would just test sversion. But the Elroy PCI adapter has
* the same sversion as Dino, so we have to check hversion as well.
* Unfortunately, the J2240 PDC reports the wrong hversion for the first
* Dino, so we have to test for Dino, Cujo and Dino-in-a-J2240.
*/
static struct parisc_device_id dino_tbl[] = {
{ HPHW_A_DMA, HVERSION_REV_ANY_ID, 0x004, 0x0009D }, /* Card-mode Dino. */
{ HPHW_A_DMA, HVERSION_REV_ANY_ID, 0x444, 0x08080 }, /* Same card in a 715. Bug? */
{ HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x680, 0xa }, /* Bridge-mode Dino */
{ HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x682, 0xa }, /* Bridge-mode Cujo */
{ HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x05d, 0xa }, /* Dino in a J2240 */
{ 0, }
};
static struct parisc_driver dino_driver = {
.name = "Dino",
.id_table = dino_tbl,
.probe = dino_driver_callback,
};
/*
* One time initialization to let the world know Dino is here.
* This is the only routine which is NOT static.
* Must be called exactly once before pci_init().
*/
int __init dino_init(void)
{
register_parisc_driver(&dino_driver);
return 0;
}
/*
* eisa.c - provide support for EISA adapters in PA-RISC machines
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
* Copyright (c) 2001 Daniel Engstrom <5116@telia.com>
*
* There are two distinct EISA adapters. Mongoose is found in machines
* before the 712; then the Wax ASIC is used. To complicate matters, the
* Wax ASIC also includes a PS/2 and RS-232 controller, but those are
* dealt with elsewhere; this file is concerned only with the EISA portions
* of Wax.
*
*
* HINT:
* -----
* To allow an ISA card to work properly in the EISA slot you need to
* set an edge trigger level. This may be done on the palo command line
* by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with
* n and n2 as the irq levels you want to use.
*
* Example: "eisa_irq_edge=10,11" allows ISA cards to operate at
* irq levels 10 and 11.
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/eisa_bus.h>
#if 0
#define EISA_DBG(msg, arg... ) printk(KERN_DEBUG "eisa: " msg , ## arg )
#else
#define EISA_DBG(msg, arg... )
#endif
#define SNAKES_EEPROM_BASE_ADDR 0xF0810400
#define MIRAGE_EEPROM_BASE_ADDR 0xF00C0400
static spinlock_t eisa_irq_lock = SPIN_LOCK_UNLOCKED;
/* We can only have one EISA adapter in the system because neither
* implementation can be flexed.
*/
static struct eisa_ba {
struct pci_hba_data hba;
unsigned long eeprom_addr;
} eisa_dev;
/* Port ops */
static inline unsigned long eisa_permute(unsigned short port)
{
if (port & 0x300) {
return 0xfc000000 | ((port & 0xfc00) >> 6)
| ((port & 0x3f8) << 9) | (port & 7);
} else {
return 0xfc000000 | port;
}
}
unsigned char eisa_in8(unsigned short port)
{
if (EISA_bus)
return gsc_readb(eisa_permute(port));
return 0xff;
}
unsigned short eisa_in16(unsigned short port)
{
if (EISA_bus)
return le16_to_cpu(gsc_readw(eisa_permute(port)));
return 0xffff;
}
unsigned int eisa_in32(unsigned short port)
{
if (EISA_bus)
return le32_to_cpu(gsc_readl(eisa_permute(port)));
return 0xffffffff;
}
void eisa_out8(unsigned char data, unsigned short port)
{
if (EISA_bus)
gsc_writeb(data, eisa_permute(port));
}
void eisa_out16(unsigned short data, unsigned short port)
{
if (EISA_bus)
gsc_writew(cpu_to_le16(data), eisa_permute(port));
}
void eisa_out32(unsigned int data, unsigned short port)
{
if (EISA_bus)
gsc_writel(cpu_to_le32(data), eisa_permute(port));
}
/* Interrupt handling */
/* cached interrupt mask registers */
static int master_mask;
static int slave_mask;
/* the trig level can be set with the
* eisa_irq_edge=n,n,n commandline parameter
* We should really read this from the EEPROM
* in the furure.
*/
/* irq 13,8,2,1,0 must be edge */
static unsigned int eisa_irq_level; /* default to edge triggered */
/* called by free irq */
static void eisa_disable_irq(void *irq_dev, int irq)
{
unsigned long flags;
EISA_DBG("disable irq %d\n", irq);
/* just mask for now */
spin_lock_irqsave(&eisa_irq_lock, flags);
if (irq & 8) {
slave_mask |= (1 << (irq&7));
eisa_out8(slave_mask, 0xa1);
} else {
master_mask |= (1 << (irq&7));
eisa_out8(master_mask, 0x21);
}
spin_unlock_irqrestore(&eisa_irq_lock, flags);
EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
}
/* called by request irq */
static void eisa_enable_irq(void *irq_dev, int irq)
{
unsigned long flags;
EISA_DBG("enable irq %d\n", irq);
spin_lock_irqsave(&eisa_irq_lock, flags);
if (irq & 8) {
slave_mask &= ~(1 << (irq&7));
eisa_out8(slave_mask, 0xa1);
} else {
master_mask &= ~(1 << (irq&7));
eisa_out8(master_mask, 0x21);
}
spin_unlock_irqrestore(&eisa_irq_lock, flags);
EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
}
static void eisa_mask_irq(void *irq_dev, int irq)
{
unsigned long flags;
EISA_DBG("mask irq %d\n", irq);
/* mask irq */
spin_lock_irqsave(&eisa_irq_lock, flags);
if (irq & 8) {
slave_mask |= (1 << (irq&7));
eisa_out8(slave_mask, 0xa1);
} else {
master_mask |= (1 << (irq&7));
eisa_out8(master_mask, 0x21);
}
spin_unlock_irqrestore(&eisa_irq_lock, flags);
}
static void eisa_unmask_irq(void *irq_dev, int irq)
{
unsigned long flags;
EISA_DBG("unmask irq %d\n", irq);
/* unmask */
spin_lock_irqsave(&eisa_irq_lock, flags);
if (irq & 8) {
slave_mask &= ~(1 << (irq&7));
eisa_out8(slave_mask, 0xa1);
} else {
master_mask &= ~(1 << (irq&7));
eisa_out8(master_mask, 0x21);
}
spin_unlock_irqrestore(&eisa_irq_lock, flags);
}
static struct irqaction action[IRQ_PER_REGION];
/* EISA needs to be fixed at IRQ region #0 (EISA_IRQ_REGION) */
static struct irq_region eisa_irq_region = {
ops: { eisa_disable_irq, eisa_enable_irq, eisa_mask_irq, eisa_unmask_irq },
data: { name: "EISA", irqbase: 0 },
action: action,
};
static void eisa_irq(int _, void *intr_dev, struct pt_regs *regs)
{
extern void do_irq(struct irqaction *a, int i, struct pt_regs *p);
int irq = gsc_readb(0xfc01f000); /* EISA supports 16 irqs */
unsigned long flags;
spin_lock_irqsave(&eisa_irq_lock, flags);
/* read IRR command */
eisa_out8(0x0a, 0x20);
eisa_out8(0x0a, 0xa0);
EISA_DBG("irq IAR %02x 8259-1 irr %02x 8259-2 irr %02x\n",
irq, eisa_in8(0x20), eisa_in8(0xa0));
/* read ISR command */
eisa_out8(0x0a, 0x20);
eisa_out8(0x0a, 0xa0);
EISA_DBG("irq 8259-1 isr %02x imr %02x 8259-2 isr %02x imr %02x\n",
eisa_in8(0x20), eisa_in8(0x21), eisa_in8(0xa0), eisa_in8(0xa1));
irq &= 0xf;
/* mask irq and write eoi */
if (irq & 8) {
slave_mask |= (1 << (irq&7));
eisa_out8(slave_mask, 0xa1);
eisa_out8(0x60 | (irq&7),0xa0);/* 'Specific EOI' to slave */
eisa_out8(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
} else {
master_mask |= (1 << (irq&7));
eisa_out8(master_mask, 0x21);
eisa_out8(0x60|irq,0x20); /* 'Specific EOI' to master */
}
spin_unlock_irqrestore(&eisa_irq_lock, flags);
do_irq(&eisa_irq_region.action[irq], EISA_IRQ_REGION + irq, regs);
spin_lock_irqsave(&eisa_irq_lock, flags);
/* unmask */
if (irq & 8) {
slave_mask &= ~(1 << (irq&7));
eisa_out8(slave_mask, 0xa1);
} else {
master_mask &= ~(1 << (irq&7));
eisa_out8(master_mask, 0x21);
}
spin_unlock_irqrestore(&eisa_irq_lock, flags);
}
static void dummy_irq2_handler(int _, void *dev, struct pt_regs *regs)
{
printk(KERN_ALERT "eisa: uhh, irq2?\n");
}
static void init_eisa_pic(void)
{
unsigned long flags;
spin_lock_irqsave(&eisa_irq_lock, flags);
eisa_out8(0xff, 0x21); /* mask during init */
eisa_out8(0xff, 0xa1); /* mask during init */
/* master pic */
eisa_out8(0x11,0x20); /* ICW1 */
eisa_out8(0x00,0x21); /* ICW2 */
eisa_out8(0x04,0x21); /* ICW3 */
eisa_out8(0x01,0x21); /* ICW4 */
eisa_out8(0x40,0x20); /* OCW2 */
/* slave pic */
eisa_out8(0x11,0xa0); /* ICW1 */
eisa_out8(0x08,0xa1); /* ICW2 */
eisa_out8(0x02,0xa1); /* ICW3 */
eisa_out8(0x01,0xa1); /* ICW4 */
eisa_out8(0x40,0xa0); /* OCW2 */
udelay(100);
slave_mask = 0xff;
master_mask = 0xfb;
eisa_out8(slave_mask, 0xa1); /* OCW1 */
eisa_out8(master_mask, 0x21); /* OCW1 */
/* setup trig level */
EISA_DBG("EISA edge/level %04x\n", eisa_irq_level);
eisa_out8(eisa_irq_level&0xff, 0x4d0); /* Set all irq's to edge */
eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1);
EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
EISA_DBG("pic0 edge/level %02x\n", eisa_in8(0x4d0));
EISA_DBG("pic1 edge/level %02x\n", eisa_in8(0x4d1));
spin_unlock_irqrestore(&eisa_irq_lock, flags);
}
/* Device initialisation */
#define is_mongoose(dev) (dev->id.sversion == 0x00076)
static int __devinit eisa_probe(struct parisc_device *dev)
{
int result;
char *name = is_mongoose(dev) ? "Mongoose" : "Wax";
printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n",
name, dev->hpa);
eisa_dev.hba.dev = dev;
eisa_dev.hba.iommu = ccio_get_iommu(dev);
eisa_dev.hba.lmmio_space.name = "EISA";
eisa_dev.hba.lmmio_space.start = (unsigned long) 0xfffffffffc000000;
eisa_dev.hba.lmmio_space.end = (unsigned long) 0xffffffffffbfffff;
eisa_dev.hba.lmmio_space.flags = IORESOURCE_MEM;
result = ccio_request_resource(dev, &eisa_dev.hba.lmmio_space);
if (result < 0) {
printk(KERN_ERR "EISA: failed to claim EISA Bus address space!\n");
return result;
}
eisa_dev.hba.io_space.name = "EISA";
eisa_dev.hba.io_space.start = 0;
eisa_dev.hba.io_space.end = 0xffff;
eisa_dev.hba.lmmio_space.flags = IORESOURCE_IO;
result = request_resource(&ioport_resource, &eisa_dev.hba.io_space);
if (result < 0) {
printk(KERN_ERR "EISA: failed to claim EISA Bus port space!\n");
return result;
}
pcibios_register_hba(&eisa_dev.hba);
result = request_irq(dev->irq, eisa_irq, SA_SHIRQ, "EISA", NULL);
if (result) {
printk(KERN_ERR "EISA: request_irq failed!\n");
return result;
}
/* Reserve IRQ2 */
action[2].handler = dummy_irq2_handler;
action[2].name = "cascade";
eisa_irq_region.data.dev = dev;
irq_region[0] = &eisa_irq_region;
EISA_bus = 1;
if (dev->num_addrs) {
/* newer firmware hand out the eeprom address */
eisa_dev.eeprom_addr = dev->addr[0];
} else {
/* old firmware, need to figure out the box */
if (is_mongoose(dev)) {
eisa_dev.eeprom_addr = SNAKES_EEPROM_BASE_ADDR;
} else {
eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR;
}
}
eisa_eeprom_init(eisa_dev.eeprom_addr);
eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, &eisa_dev.hba.lmmio_space);
init_eisa_pic();
return 0;
}
static struct parisc_device_id __devinitdata eisa_tbl[] = {
{ HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00076 }, /* Mongoose */
{ HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00090 }, /* Wax EISA */
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, eisa_tbl);
static struct parisc_driver eisa_driver = {
name: "EISA Bus Adapter",
id_table: eisa_tbl,
probe: eisa_probe,
};
void __init eisa_init(void)
{
register_parisc_driver(&eisa_driver);
}
static unsigned int eisa_irq_configured;
void eisa_make_irq_level(int num)
{
if (eisa_irq_configured& (1<<num)) {
printk(KERN_WARNING
"IRQ %d polarity configured twice (last to level)\n",
num);
}
eisa_irq_level |= (1<<num); /* set the corresponding bit */
eisa_irq_configured |= (1<<num); /* set the corresponding bit */
}
void eisa_make_irq_edge(int num)
{
if (eisa_irq_configured& (1<<num)) {
printk(KERN_WARNING
"IRQ %d polarity configured twice (last to edge)\n",
num);
}
eisa_irq_level &= ~(1<<num); /* clear the corresponding bit */
eisa_irq_configured |= (1<<num); /* set the corresponding bit */
}
static int __init eisa_irq_setup(char *str)
{
char *cur = str;
int val;
EISA_DBG("IRQ setup\n");
while (cur != NULL) {
char *pe;
val = (int) simple_strtoul(cur, &pe, 0);
if (val > 15 || val < 0) {
printk(KERN_ERR "eisa: EISA irq value are 0-15\n");
continue;
}
if (val == 2) {
val = 9;
}
eisa_make_irq_edge(val); /* clear the corresponding bit */
EISA_DBG("setting IRQ %d to edge-triggered mode\n", val);
if ((cur = strchr(cur, ','))) {
cur++;
} else {
break;
}
}
return 1;
}
__setup("eisa_irq_edge=", eisa_irq_setup);
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/eisa_eeprom.h>
#define EISA_EEPROM_MINOR 241
static unsigned long eeprom_addr;
static long long eisa_eeprom_llseek(struct file *file, loff_t offset, int origin )
{
switch (origin) {
case 0:
/* nothing to do */
break;
case 1:
offset += file->f_pos;
break;
case 2:
offset += HPEE_MAX_LENGTH;
break;
}
return (offset >= 0 && offset < HPEE_MAX_LENGTH) ? (file->f_pos = offset) : -EINVAL;
}
static ssize_t eisa_eeprom_read(struct file * file,
char *buf, size_t count, loff_t *ppos )
{
unsigned char *tmp;
ssize_t ret;
int i;
if (*ppos >= HPEE_MAX_LENGTH)
return 0;
count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos;
tmp = kmalloc(count, GFP_KERNEL);
if (tmp) {
for (i = 0; i < count; i++)
tmp[i] = gsc_readb(eeprom_addr+(*ppos)++);
if (copy_to_user (buf, tmp, count))
ret = -EFAULT;
else
ret = count;
kfree (tmp);
} else
ret = -ENOMEM;
return ret;
}
static int eisa_eeprom_ioctl(struct inode *inode, struct file *file,
unsigned int cmd,
unsigned long arg)
{
return -ENOTTY;
}
static int eisa_eeprom_open(struct inode *inode, struct file *file)
{
if (file->f_mode & 2 || eeprom_addr == 0)
return -EINVAL;
return 0;
}
static int eisa_eeprom_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* The various file operations we support.
*/
static struct file_operations eisa_eeprom_fops = {
owner: THIS_MODULE,
llseek: eisa_eeprom_llseek,
read: eisa_eeprom_read,
ioctl: eisa_eeprom_ioctl,
open: eisa_eeprom_open,
release: eisa_eeprom_release,
};
static struct miscdevice eisa_eeprom_dev=
{
EISA_EEPROM_MINOR,
"eisa eeprom",
&eisa_eeprom_fops
};
int __init eisa_eeprom_init(unsigned long addr)
{
if (addr) {
eeprom_addr = addr;
misc_register(&eisa_eeprom_dev);
printk(KERN_INFO "EISA EEPROM at 0x%lx\n", eeprom_addr);
}
return 0;
}
MODULE_LICENSE("GPL");
/*
* eisa_enumerator.c - provide support for EISA adapters in PA-RISC machines
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 2002 Daniel Engstrom <5116@telia.com>
*
*/
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include <asm/eisa_bus.h>
#include <asm/eisa_eeprom.h>
/*
* Todo:
*
* PORT init with MASK attr and other size than byte
* MEMORY with other decode than 20 bit
* CRC stuff
* FREEFORM stuff
*/
#define EPI 0xc80
#define NUM_SLOT 16
#define SLOT2PORT(x) (x<<12)
/* macros to handle unaligned accesses and
* byte swapping. The data in the EEPROM is
* little-endian on the big-endian PAROSC */
#define get_8(x) (*(u_int8_t*)(x))
static inline u_int16_t get_16(const unsigned char *x)
{
return (x[1] << 8) | x[0];
}
static inline u_int32_t get_32(const unsigned char *x)
{
return (x[3] << 24) | (x[2] << 16) | (x[1] << 8) | x[0];
}
static inline u_int32_t get_24(const unsigned char *x)
{
return (x[2] << 24) | (x[1] << 16) | (x[0] << 8);
}
static void print_eisa_id(char *s, u_int32_t id)
{
char vendor[4];
int rev;
int device;
rev = id & 0xff;
id >>= 8;
device = id & 0xff;
id >>= 8;
vendor[3] = '\0';
vendor[2] = '@' + (id & 0x1f);
id >>= 5;
vendor[1] = '@' + (id & 0x1f);
id >>= 5;
vendor[0] = '@' + (id & 0x1f);
id >>= 5;
sprintf(s, "%s%02X%02X", vendor, device, rev);
}
static int configure_memory(const unsigned char *buf,
struct resource *mem_parent,
char *name)
{
int len;
u_int8_t c;
int i;
struct resource *res;
len=0;
for (i=0;i<HPEE_MEMORY_MAX_ENT;i++) {
c = get_8(buf+len);
if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
int result;
res->name = name;
res->start = mem_parent->start + get_24(buf+len+2);
res->end = res->start + get_16(buf+len+5)*1024;
res->flags = IORESOURCE_MEM;
printk("memory %lx-%lx ", res->start, res->end);
result = request_resource(mem_parent, res);
if (result < 0) {
printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
return result;
}
}
len+=7;
if (!(c & HPEE_MEMORY_MORE)) {
break;
}
}
return len;
}
static int configure_irq(const unsigned char *buf)
{
int len;
u_int8_t c;
int i;
len=0;
for (i=0;i<HPEE_IRQ_MAX_ENT;i++) {
c = get_8(buf+len);
printk("IRQ %d ", c & HPEE_IRQ_CHANNEL_MASK);
if (c & HPEE_IRQ_TRIG_LEVEL) {
eisa_make_irq_level(c & HPEE_IRQ_CHANNEL_MASK);
} else {
eisa_make_irq_edge(c & HPEE_IRQ_CHANNEL_MASK);
}
len+=2;
/* hpux seems to allow for
* two bytes of irq data but only defines one of
* them, I think */
if (!(c & HPEE_IRQ_MORE)) {
break;
}
}
return len;
}
static int configure_dma(const unsigned char *buf)
{
int len;
u_int8_t c;
int i;
len=0;
for (i=0;i<HPEE_DMA_MAX_ENT;i++) {
c = get_8(buf+len);
printk("DMA %d ", c&HPEE_DMA_CHANNEL_MASK);
/* fixme: maybe initialize the dma channel withthe timing ? */
len+=2;
if (!(c & HPEE_DMA_MORE)) {
break;
}
}
return len;
}
static int configure_port(const unsigned char *buf, struct resource *io_parent,
char *board)
{
int len;
u_int8_t c;
int i;
struct resource *res;
int result;
len=0;
for (i=0;i<HPEE_PORT_MAX_ENT;i++) {
c = get_8(buf+len);
if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
res->name = board;
res->start = get_16(buf+len+1);
res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1;
res->flags = IORESOURCE_IO;
printk("ioports %lx-%lx ", res->start, res->end);
result = request_resource(io_parent, res);
if (result < 0) {
printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
return result;
}
}
len+=3;
if (!(c & HPEE_PORT_MORE)) {
break;
}
}
return len;
}
/* byte 1 and 2 is the port number to write
* and at byte 3 the value to write starts.
* I assume that there are and- and or- masks
* here when HPEE_PORT_INIT_MASK is set but I have
* not yet encountered this. */
static int configure_port_init(const unsigned char *buf)
{
int len=0;
u_int8_t c;
while (len<HPEE_PORT_INIT_MAX_LEN) {
int s=0;
c = get_8(buf+len);
switch (c & HPEE_PORT_INIT_WIDTH_MASK) {
case HPEE_PORT_INIT_WIDTH_BYTE:
s=1;
if (c & HPEE_PORT_INIT_MASK) {
printk("\n" KERN_WARNING "port_init: unverified mask attribute\n");
outb((inb(get_16(buf+len+1) &
get_8(buf+len+3)) |
get_8(buf+len+4)), get_16(buf+len+1));
} else {
outb(get_8(buf+len+3), get_16(buf+len+1));
}
break;
case HPEE_PORT_INIT_WIDTH_WORD:
s=2;
if (c & HPEE_PORT_INIT_MASK) {
printk(KERN_WARNING "port_init: unverified mask attribute\n");
outw((inw(get_16(buf+len+1)) &
get_16(buf+len+3)) |
get_16(buf+len+5),
get_16(buf+len+1));
} else {
outw(cpu_to_le16(get_16(buf+len+3)), get_16(buf+len+1));
}
break;
case HPEE_PORT_INIT_WIDTH_DWORD:
s=4;
if (c & HPEE_PORT_INIT_MASK) {
printk("\n" KERN_WARNING "port_init: unverified mask attribute\n");
outl((inl(get_16(buf+len+1) &
get_32(buf+len+3)) |
get_32(buf+len+7)), get_16(buf+len+1));
} else {
outl(cpu_to_le32(get_32(buf+len+3)), get_16(buf+len+1));
}
break;
default:
printk("\n" KERN_ERR "Invalid port init word %02x\n", c);
return 0;
}
if (c & HPEE_PORT_INIT_MASK) {
s*=2;
}
len+=s+3;
if (!(c & HPEE_PORT_INIT_MORE)) {
break;
}
}
return len;
}
static int configure_choise(const unsigned char *buf, u_int8_t *info)
{
int len;
/* theis record contain the value of the functions
* configuration choises and an info byte which
* describes which other records to expect in this
* function */
len = get_8(buf);
*info=get_8(buf+len+1);
return len+2;
}
static int configure_type_string(const unsigned char *buf)
{
int len;
/* just skip past the type field */
len = get_8(buf);
if (len > 80) {
printk("\n" KERN_ERR "eisa_enumerator: type info field too long (%d, max is 80)\n", len);
}
return 1+len;
}
static int configure_function(const unsigned char *buf, int *more)
{
/* the init field seems to be a two-byte field
* which is non-zero if there are an other function following
* I think it is the length of the function def
*/
*more = get_16(buf);
return 2;
}
static int parse_slot_config(int slot,
const unsigned char *buf,
struct eeprom_eisa_slot_info *es,
struct resource *io_parent,
struct resource *mem_parent)
{
int res=0;
int function_len;
unsigned int pos=0;
unsigned int maxlen;
int num_func=0;
u_int8_t flags;
int p0;
char *board;
int id_string_used=0;
if (NULL == (board = kmalloc(8, GFP_KERNEL))) {
return -1;
}
print_eisa_id(board, es->eisa_slot_id);
printk(KERN_INFO "EISA slot %d: %s %s ",
slot, board, es->flags&HPEE_FLAG_BOARD_IS_ISA ? "ISA" : "EISA");
maxlen = es->config_data_length < HPEE_MAX_LENGTH ?
es->config_data_length : HPEE_MAX_LENGTH;
while ((pos < maxlen) && (num_func <= es->num_functions)) {
pos+=configure_function(buf+pos, &function_len);
if (!function_len) {
break;
}
num_func++;
p0 = pos;
pos += configure_choise(buf+pos, &flags);
if (flags & HPEE_FUNCTION_INFO_F_DISABLED) {
/* function disabled, skip silently */
pos = p0 + function_len;
continue;
}
if (flags & HPEE_FUNCTION_INFO_CFG_FREE_FORM) {
/* I have no idea how to handle this */
printk("function %d have free-form confgiuration, skipping ",
num_func);
pos = p0 + function_len;
continue;
}
/* the ordering of the sections need
* more investigation.
* Currently I think that memory comaed before IRQ
* I assume the order is LSB to MSB in the
* info flags
* eg type, memory, irq, dma, port, HPEE_PORT_init
*/
if (flags & HPEE_FUNCTION_INFO_HAVE_TYPE) {
pos += configure_type_string(buf+pos);
}
if (flags & HPEE_FUNCTION_INFO_HAVE_MEMORY) {
id_string_used=1;
pos += configure_memory(buf+pos, mem_parent, board);
}
if (flags & HPEE_FUNCTION_INFO_HAVE_IRQ) {
pos += configure_irq(buf+pos);
}
if (flags & HPEE_FUNCTION_INFO_HAVE_DMA) {
pos += configure_dma(buf+pos);
}
if (flags & HPEE_FUNCTION_INFO_HAVE_PORT) {
id_string_used=1;
pos += configure_port(buf+pos, io_parent, board);
}
if (flags & HPEE_FUNCTION_INFO_HAVE_PORT_INIT) {
pos += configure_port_init(buf+pos);
}
if (p0 + function_len < pos) {
printk("\n" KERN_ERR "eisa_enumerator: function %d length mis-match "
"got %d, expected %d\n",
num_func, pos-p0, function_len);
res=-1;
break;
}
pos = p0 + function_len;
}
printk("\n");
if (!id_string_used) {
kfree(board);
}
if (pos != es->config_data_length) {
printk(KERN_ERR "eisa_enumerator: config data length mis-match got %d, expected %d\n",
pos, es->config_data_length);
res=-1;
}
if (num_func != es->num_functions) {
printk(KERN_ERR "eisa_enumerator: number of functions mis-match got %d, expected %d\n",
num_func, es->num_functions);
res=-2;
}
return res;
}
static int init_slot(int slot, struct eeprom_eisa_slot_info *es)
{
unsigned int id;
char id_string[8];
if (!(es->slot_info&HPEE_SLOT_INFO_NO_READID)) {
/* try to read the id of the board in the slot */
id = le32_to_cpu(inl(SLOT2PORT(slot)+EPI));
if (0xffffffff == id) {
/* this board is not here or it does not
* support readid
*/
printk(KERN_ERR "EISA slot %d a configured board was not detected (",
slot);
print_eisa_id(id_string, es->eisa_slot_id);
printk(" expected %s)\n", id_string);
return -1;
}
if (es->eisa_slot_id != id) {
print_eisa_id(id_string, id);
printk(KERN_ERR "EISA slot %d id mis-match: got %s",
slot, id_string);
print_eisa_id(id_string, es->eisa_slot_id);
printk(" expected %s \n", id_string);
return -1;
}
}
/* now: we need to enable the board if
* it supports enabling and run through
* the port init sction if present
* and finally record any interrupt polarity
*/
if (es->slot_features & HPEE_SLOT_FEATURES_ENABLE) {
/* enable board */
outb(0x01| inb(SLOT2PORT(slot)+EPI+4),
SLOT2PORT(slot)+EPI+4);
}
return 0;
}
int eisa_enumerator(unsigned long eeprom_addr,
struct resource *io_parent, struct resource *mem_parent)
{
int i;
struct eeprom_header *eh;
static char eeprom_buf[HPEE_MAX_LENGTH];
for (i=0; i < HPEE_MAX_LENGTH; i++) {
eeprom_buf[i] = gsc_readb(eeprom_addr+i);
}
printk(KERN_INFO "Enumerating EISA bus\n");
eh = (struct eeprom_header*)(eeprom_buf);
for (i=0;i<eh->num_slots;i++) {
struct eeprom_eisa_slot_info *es;
es = (struct eeprom_eisa_slot_info*)
(&eeprom_buf[HPEE_SLOT_INFO(i)]);
if (-1==init_slot(i+1, es)) {
return -1;
}
if (es->config_data_offset < HPEE_MAX_LENGTH) {
if (parse_slot_config(i+1, &eeprom_buf[es->config_data_offset],
es, io_parent, mem_parent)) {
return -1;
}
} else {
printk (KERN_WARNING "EISA EEPROM offset 0x%x out of range\n",es->config_data_offset);
return -1;
}
}
return 0;
}
/*
* Interrupt management for most GSC and related devices.
*
* (c) Copyright 1999 Alex deVries for The Puffin Group
* (c) Copyright 1999 Grant Grundler for Hewlett-Packard
* (c) Copyright 1999 Matthew Wilcox
* (c) Copyright 2000 Helge Deller
* (c) Copyright 2001 Matthew Wilcox for Hewlett-Packard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include "gsc.h"
#undef DEBUG
#ifdef DEBUG
#define DEBPRINTK printk
#else
#define DEBPRINTK(x,...)
#endif
int gsc_alloc_irq(struct gsc_irq *i)
{
int irq = txn_alloc_irq();
if (irq < 0) {
printk("cannot get irq\n");
return irq;
}
i->txn_addr = txn_alloc_addr(irq);
i->txn_data = txn_alloc_data(irq, GSC_EIM_WIDTH);
i->irq = irq;
return irq;
}
int gsc_claim_irq(struct gsc_irq *i, int irq)
{
int c = irq;
irq += IRQ_FROM_REGION(CPU_IRQ_REGION); /* virtualize the IRQ first */
irq = txn_claim_irq(irq);
if (irq < 0) {
printk("cannot claim irq %d\n", c);
return irq;
}
i->txn_addr = txn_alloc_addr(irq);
i->txn_data = txn_alloc_data(irq, GSC_EIM_WIDTH);
i->irq = irq;
return irq;
}
EXPORT_SYMBOL(gsc_alloc_irq);
EXPORT_SYMBOL(gsc_claim_irq);
/* IRQ bits must be numbered from Most Significant Bit */
#define GSC_FIX_IRQ(x) (31-(x))
#define GSC_MASK_IRQ(x) (1<<(GSC_FIX_IRQ(x)))
/* Common interrupt demultiplexer used by Asp, Lasi & Wax. */
void busdev_barked(int busdev_irq, void *dev, struct pt_regs *regs)
{
unsigned long irq;
struct busdevice *busdev = (struct busdevice *) dev;
/*
Don't need to protect OFFSET_IRR with spinlock since this is
the only place it's touched.
Protect busdev_region by disabling this region's interrupts,
modifying the region, and then re-enabling the region.
*/
irq = gsc_readl(busdev->hpa+OFFSET_IRR);
if (irq == 0) {
printk(KERN_ERR "%s: barking without apparent reason.\n", busdev->name);
} else {
DEBPRINTK ("%s (0x%x) barked, mask=0x%x, irq=%d\n",
busdev->name, busdev->busdev_region->data.irqbase,
irq, GSC_FIX_IRQ(ffs(irq))+1 );
do_irq_mask(irq, busdev->busdev_region, regs);
}
}
static void
busdev_disable_irq(void *irq_dev, int irq)
{
/* Disable the IRQ line by clearing the bit in the IMR */
u32 imr = gsc_readl(BUSDEV_DEV(irq_dev)->hpa+OFFSET_IMR);
imr &= ~(GSC_MASK_IRQ(irq));
DEBPRINTK( KERN_WARNING "%s(%p, %d) %s: IMR 0x%x\n",
__FUNCTION__, irq_dev, irq, BUSDEV_DEV(irq_dev)->name, imr);
gsc_writel(imr, BUSDEV_DEV(irq_dev)->hpa+OFFSET_IMR);
}
static void
busdev_enable_irq(void *irq_dev, int irq)
{
/* Enable the IRQ line by setting the bit in the IMR */
unsigned long addr = BUSDEV_DEV(irq_dev)->hpa + OFFSET_IMR;
u32 imr = gsc_readl(addr);
imr |= GSC_MASK_IRQ(irq);
DEBPRINTK (KERN_WARNING "%s(%p, %d) %s: IMR 0x%x\n",
__FUNCTION__, irq_dev, irq, BUSDEV_DEV(irq_dev)->name, imr);
gsc_writel(imr, addr);
// gsc_writel(~0L, addr);
/* FIXME: read IPR to make sure the IRQ isn't already pending.
** If so, we need to read IRR and manually call do_irq_mask().
** This code should be shared with busdev_unmask_irq().
*/
}
static void
busdev_mask_irq(void *irq_dev, int irq)
{
/* FIXME: Clear the IMR bit in busdev for that IRQ */
}
static void
busdev_unmask_irq(void *irq_dev, int irq)
{
/* FIXME: Read IPR. Set the IMR bit in busdev for that IRQ.
call do_irq_mask() if IPR is non-zero
*/
}
struct irq_region_ops busdev_irq_ops = {
disable_irq: busdev_disable_irq,
enable_irq: busdev_enable_irq,
mask_irq: busdev_mask_irq,
unmask_irq: busdev_unmask_irq
};
int gsc_common_irqsetup(struct parisc_device *parent, struct busdevice *busdev)
{
struct resource *res;
busdev->gsc = parent;
/* the IRQs we simulate */
busdev->busdev_region = alloc_irq_region(32, &busdev_irq_ops,
busdev->name, busdev);
if (!busdev->busdev_region)
return -ENOMEM;
/* allocate resource region */
res = request_mem_region(busdev->hpa, 0x100000, busdev->name);
if (res) {
res->flags = IORESOURCE_MEM; /* do not mark it busy ! */
}
#if 0
printk(KERN_WARNING "%s IRQ %d EIM 0x%x", busdev->name,
busdev->parent_irq, busdev->eim);
if (gsc_readl(busdev->hpa + OFFSET_IMR))
printk(" IMR is non-zero! (0x%x)",
gsc_readl(busdev->hpa + OFFSET_IMR));
printk("\n");
#endif
return 0;
}
extern struct parisc_driver lasi_driver;
extern struct parisc_driver asp_driver;
extern struct parisc_driver wax_driver;
void __init gsc_init(void)
{
#ifdef CONFIG_GSC_LASI
register_parisc_driver(&lasi_driver);
register_parisc_driver(&asp_driver);
#endif
#ifdef CONFIG_GSC_WAX
register_parisc_driver(&wax_driver);
#endif
}
/*
* drivers/parisc/gsc.h
* Declarations for functions in gsc.c
* Copyright (c) 2000-2002 Helge Deller, Matthew Wilcox
*
* Distributed under the terms of the GPL, version 2
*/
#include <linux/interrupt.h>
#include <asm/hardware.h>
#define OFFSET_IRR 0x0000 /* Interrupt request register */
#define OFFSET_IMR 0x0004 /* Interrupt mask register */
#define OFFSET_IPR 0x0008 /* Interrupt pending register */
#define OFFSET_ICR 0x000C /* Interrupt control register */
#define OFFSET_IAR 0x0010 /* Interrupt address register */
/* PA I/O Architected devices support at least 5 bits in the EIM register. */
#define GSC_EIM_WIDTH 5
struct gsc_irq {
unsigned long txn_addr; /* IRQ "target" */
int txn_data; /* HW "IRQ" */
int irq; /* virtual IRQ */
};
struct busdevice {
struct parisc_device *gsc;
unsigned long hpa;
char *name;
int version;
int type;
int parent_irq;
int eim;
struct irq_region *busdev_region;
};
/* short cut to keep the compiler happy */
#define BUSDEV_DEV(x) ((struct busdevice *) (x))
int gsc_common_irqsetup(struct parisc_device *parent, struct busdevice *busdev);
extern int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */
extern int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */
void busdev_barked(int busdev_irq, void *dev, struct pt_regs *regs);
......@@ -169,11 +169,13 @@
#include <asm/byteorder.h> /* get in-line asm for swab */
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/page.h>
#include <asm/segment.h>
#include <asm/system.h>
#include <asm/gsc.h> /* gsc_read/write functions */
#include <asm/io.h> /* gsc_read/write functions */
#ifdef CONFIG_SUPERIO
#include <asm/superio.h>
#endif
#include <asm/iosapic.h>
#include "./iosapic_private.h"
......@@ -181,7 +183,6 @@
#define MODULE_NAME "iosapic"
/* "local" compile flags */
#undef IOSAPIC_CALLBACK
#undef PCI_BRIDGE_FUNCS
#undef DEBUG_IOSAPIC
#undef DEBUG_IOSAPIC_IRT
......@@ -210,6 +211,7 @@ assert_failed (char *a, char *f, int l)
#else /* DEBUG_IOSAPIC */
#define DBG(x...)
#undef ASSERT
#define ASSERT(EX)
#endif /* DEBUG_IOSAPIC */
......@@ -282,28 +284,6 @@ assert_failed (char *a, char *f, int l)
#define IOSAPIC_EOI(eoi_addr, eoi_data) gsc_writel(eoi_data, eoi_addr)
#if IOSAPIC_CALLBACK
/*
** Shouldn't use callback since SAPIC doesn't have an officially assigned
** H or S version numbers. Slight long term risk the number chosen would
** collide with something else.
** But benefit is cleaner lba/sapic interface.
** Might be worth it but for just use direct calls for now.
**
** Entry below is copied from lba driver.
** Only thing different is hw_type.
*/
static struct pa_iodc_driver iosapic_driver_for[] = {
{HPHW_OTHER, 0x782, 0, 0x0000A, 0, 0x00,
DRIVER_CHECK_HWTYPE + DRIVER_CHECK_HVERSION + DRIVER_CHECK_SVERSION,
"I/O Sapic", "",(void *) iosapic_callback},
{0,0,0,0,0,0,
0,
(char *) NULL,(char *) NULL,(void *) NULL}
};
#endif /* IOSAPIO_CALLBACK */
static struct iosapic_info *iosapic_list;
static spinlock_t iosapic_lock;
static int iosapic_count;
......@@ -350,25 +330,22 @@ static size_t irt_num_entry;
static int __init /* return number of entries as success/fail flag */
iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt)
{
struct pdc_pat_io_num pdc_io_num; /* PAT PDC return block */
long status; /* PDC return value status */
struct irt_entry *table = NULL; /* start of interrupt routing tbl */
struct irt_entry *table; /* start of interrupt routing tbl */
unsigned long num_entries = 0UL;
ASSERT(NULL != irt);
/* FIXME ASSERT(((&pdc_io_num) & (0x3f)) == 0); enforce 32-byte alignment */
/* Try PAT_PDC to get interrupt routing table size */
DBG(KERN_DEBUG "calling get_irt_size\n");
status = pdc_pat_get_irt_size( &pdc_io_num, cell_num);
DBG(KERN_DEBUG "get_irt_size: %ld\n", status);
if (is_pdc_pat()) {
switch(status) {
/* Use pat pdc routine to get interrupt routing table size */
DBG("calling get_irt_size (cell %ld)\n", cell_num);
status = pdc_pat_get_irt_size(&num_entries, cell_num);
DBG("get_irt_size: %ld\n", status);
case PDC_RET_OK: /* PAT box. Proceed to get the IRT */
ASSERT(status == PDC_OK);
/* save the number of entries in the table */
num_entries = pdc_io_num.num;
ASSERT(0UL != num_entries);
/*
......@@ -384,30 +361,27 @@ iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt)
}
/* get PCI INT routing table */
status = pdc_pat_get_irt( (void *) table, cell_num);
DBG(KERN_DEBUG "pdc_pat_get_irt: %ld\n", status);
ASSERT(status == PDC_RET_OK);
break;
case PDC_RET_NE_PROC: /* Not a PAT platform. Try PDC_PCI extensions */
status = pdc_pat_get_irt(table, cell_num);
DBG("pdc_pat_get_irt: %ld\n", status);
ASSERT(status == PDC_OK);
} else {
/*
** C3000/J5000 (and similar) platforms with "legacy" PDC
** will return exactly one IRT.
** C3000/J5000 (and similar) platforms with Sprockets PDC
** will return exactly one IRT for all iosapics.
** So if we have one, don't need to get it again.
*/
if (NULL != irt_cell)
break;
return 0;
status = pdc_pci_irt_size( (void *)&pdc_io_num,
/* elroy HPA (really a NOP) */ 0);
DBG(KERN_WARNING "pdc_pci_irt_size: %ld\n", status);
/* Should be using the Elroy's HPA, but it's ignored anyway */
status = pdc_pci_irt_size(&num_entries, 0);
DBG("pdc_pci_irt_size: %ld\n", status);
if (PDC_RET_OK != status) {
if (PDC_OK != status) {
/* Not a "legacy" system with I/O SAPIC either */
return 0;
}
num_entries = pdc_io_num.num;
ASSERT(0UL != num_entries);
table = IOSAPIC_KALLOC(struct irt_entry, num_entries);
......@@ -416,36 +390,27 @@ iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt)
return 0;
}
status = pdc_pci_irt( (void *) &pdc_io_num,
(void *) NULL, /* Elroy HPA - not used */
(void *) table);
ASSERT(PDC_RET_OK == status);
break;
default:
printk(KERN_WARNING MODULE_NAME ": PDC_PAT_IO call failed with %ld\n", status);
break;
/* HPA ignored by this call too. */
status = pdc_pci_irt(num_entries, 0, table);
ASSERT(PDC_OK == status);
}
/* return interrupt table address */
*irt = table;
#ifdef DEBUG_IOSAPIC_IRT
{
{
struct irt_entry *p = table;
int i;
printk(MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num);
printk(MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n",
printk(KERN_DEBUG MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num);
printk(KERN_DEBUG MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n",
table,
num_entries,
(int) sizeof(struct irt_entry));
for (i = 0 ; i < num_entries ; i++, p++)
{
printk(MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n",
for (i = 0 ; i < num_entries ; i++, p++) {
printk(KERN_DEBUG MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n",
p->entry_type, p->entry_length, p->interrupt_type,
p->polarity_trigger, p->src_bus_irq_devno, p->src_bus_id,
p->src_seg_id, p->dest_iosapic_intin,
......@@ -453,7 +418,7 @@ iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt)
((u32 *) p)[3]
);
}
}
}
#endif /* DEBUG_IOSAPIC_IRT */
return num_entries;
......@@ -464,6 +429,8 @@ iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt)
void __init
iosapic_init(void)
{
unsigned long cell = 0;
/* init global data */
iosapic_lock = SPIN_LOCK_UNLOCKED;
iosapic_list = (struct iosapic_info *) NULL;
......@@ -471,22 +438,24 @@ iosapic_init(void)
DBG("iosapic_init()\n");
#ifdef __LP64__
if (is_pdc_pat()) {
int status;
struct pdc_pat_cell_num cell_info;
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
cell = cell_info.cell_num;
}
}
#endif
/*
** get IRT for this cell.
*/
irt_num_entry = iosapic_load_irt(0L, &irt_cell);
irt_num_entry = iosapic_load_irt(cell, &irt_cell);
if (0 == irt_num_entry)
irt_cell = NULL; /* old PDC w/o iosapic */
#ifdef IOSAPIC_CALLBACK
/*
** When new I/O SAPICs are discovered, this callback
** will get invoked. Implies lba driver will register
** I/O Sapic as a device it "discovered" with faked
** IODC data.
*/
register_driver(iosapic_driver_for);
#endif /* IOSAPIC_CALLBACK */
}
......@@ -546,7 +515,7 @@ irt_find_irqline(struct iosapic_info *isi, u8 slot, u8 intr_pin)
return i;
}
printk(KERN_WARNING MODULE_NAME ": 0x%p : no IRT entry for slot %d, pin %d\n",
printk(KERN_WARNING MODULE_NAME ": 0x%lx : no IRT entry for slot %d, pin %d\n",
isi->isi_hpa, slot, intr_pin);
return NULL;
}
......@@ -571,21 +540,18 @@ iosapic_xlate_pin(struct iosapic_info *isi, struct pci_dev *pcidev)
{
u8 intr_pin, intr_slot;
(void) pci_read_config_byte(pcidev, PCI_INTERRUPT_PIN, &intr_pin);
pci_read_config_byte(pcidev, PCI_INTERRUPT_PIN, &intr_pin);
DBG_IRT("iosapic_xlate_pin() SLOT %d pin %d\n", PCI_SLOT(pcidev->devfn), intr_pin);
DBG_IRT("iosapic_xlate_pin() SLOT %d pin %d\n",
PCI_SLOT(pcidev->devfn), intr_pin);
if (0 == intr_pin)
{
/*
** The device does NOT support/use IRQ lines.
*/
if (0 == intr_pin) {
/* The device does NOT support/use IRQ lines. */
return NULL;
}
/* Check if pcidev behind a PPB */
if (NULL != pcidev->bus->self)
{
if (NULL != pcidev->bus->self) {
/* Convert pcidev INTR_PIN into something we
** can lookup in the IRT.
*/
......@@ -600,7 +566,7 @@ iosapic_xlate_pin(struct iosapic_info *isi, struct pci_dev *pcidev)
** or by some ambitous soul who wants to watch TV.
*/
if (pci_bridge_funcs->xlate_intr_line) {
intr_pin = (*pci_bridge_funcs->xlate_intr_line)(pcidev);
intr_pin = pci_bridge_funcs->xlate_intr_line(pcidev);
}
#else /* PCI_BRIDGE_FUNCS */
struct pci_bus *p = pcidev->bus;
......@@ -646,8 +612,8 @@ iosapic_interrupt(int irq, void *dev_id, struct pt_regs * regs)
extern void do_irq(struct irqaction *a, int i, struct pt_regs *p);
int irq_num = vi->vi_ios->isi_region->data.irqbase + vi->vi_irqline;
DBG("iosapic_interrupt(): irq %d line %d eoi %p\n", irq, vi->vi_irqline,
vi->vi_eoi_addr);
DBG("iosapic_interrupt(): irq %d line %d eoi %p\n",
irq, vi->vi_irqline, vi->vi_eoi_addr);
/* FIXME: Need to mask/unmask? processor IRQ is already masked... */
do_irq(&vi->vi_ios->isi_region->action[vi->vi_irqline], irq_num, regs);
......@@ -668,11 +634,38 @@ iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
struct vector_info *vi;
int isi_line; /* line used by device */
int tmp;
int return_irq;
#ifdef CONFIG_SUPERIO
int superio_irq = -1;
#endif
if (NULL == isi) {
printk(KERN_WARNING MODULE_NAME ": 0x%p hpa not registered\n", isi->isi_hpa);
printk(KERN_WARNING MODULE_NAME ": hpa not registered for %s\n",
pcidev->name);
return(-1);
}
#ifdef CONFIG_SUPERIO
if (is_superio_device(pcidev)) {
superio_irq = superio_fixup_irq(pcidev);
if (superio_irq == -1)
return(-1);
if (PCI_FUNC(pcidev->devfn) != SUPERIO_USB_FN) {
/*
* SuperIO USB controller has an irt entry.
* Only let the USB controller hookup the rest
* of the interrupt routing when it comes through.
* Note that interrupts for all three functions
* actually come through the PIC's on function 1!
*/
pcidev->irq = superio_irq;
return superio_irq;
}
}
#endif /* CONFIG_SUPERIO */
/* lookup IRT entry for isi/slot/pin set */
irte = iosapic_xlate_pin(isi, pcidev);
......@@ -714,22 +707,31 @@ iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
vi->vi_txn_data = txn_alloc_data(vi->vi_txn_irq, 8);
ASSERT(vi->vi_txn_data < 256); /* matches 8 above */
tmp = request_irq(vi->vi_txn_irq, iosapic_interrupt, 0, "iosapic", vi);
tmp = request_irq(vi->vi_txn_irq, iosapic_interrupt, 0,
vi->vi_name, vi);
ASSERT(tmp == 0);
vi->vi_eoi_addr = ((void *) isi->isi_hpa) + IOSAPIC_REG_EOI;
vi->vi_eoi_addr = (u32 *) (isi->isi_hpa + IOSAPIC_REG_EOI);
vi->vi_eoi_data = cpu_to_le32(vi->vi_irqline);
ASSERT(NULL != isi->isi_region);
/*
** pcidev->irq still needs to be virtualized.
*/
pcidev->irq = isi->isi_region->data.irqbase + isi_line;
/* pcidev->irq still needs to be virtualized. */
DBG_IRT("iosapic_fixup_irq() %d:%d %x %x line %d irq %d\n", PCI_SLOT(pcidev->devfn),
PCI_FUNC(pcidev->devfn), pcidev->vendor, pcidev->device, isi_line, pcidev->irq);
return_irq = isi->isi_region->data.irqbase + isi_line;
return(pcidev->irq);
#ifdef CONFIG_SUPERIO
if (superio_irq != -1) {
superio_inform_irq(return_irq);
return_irq = superio_irq;
}
#endif
pcidev->irq = return_irq;
DBG_IRT("iosapic_fixup_irq() %d:%d %x %x line %d irq %d\n",
PCI_SLOT(pcidev->devfn),
PCI_FUNC(pcidev->devfn), pcidev->vendor, pcidev->device, isi_line, return_irq);
return return_irq;
}
......@@ -755,7 +757,7 @@ iosapic_wr_irt_entry(struct vector_info *vi, u32 dp0, u32 dp1)
struct iosapic_info *isp = vi->vi_ios;
ASSERT(NULL != isp);
ASSERT(NULL != isp->isi_hpa);
ASSERT(0 != isp->isi_hpa);
DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %p WINDOW %p 0x%x 0x%x\n",
vi->vi_irqline,
isp->isi_hpa, isp->isi_hpa+IOSAPIC_REG_WINDOW,
......@@ -807,16 +809,13 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
** Extracting id_eid isn't a real clean way of getting it.
** But the encoding is the same for both PA and IA64 platforms.
*/
#ifdef __LP64__
if (pdc_pat) {
if (is_pdc_pat()) {
/*
** PAT PDC just hands it to us "right".
** vi_txn_addr comes from cpu_data[x].txn_addr.
*/
*dp1 = (u32) (vi->vi_txn_addr);
} else
#endif
{
} else {
/*
** eg if base_addr == 0xfffa0000),
** we want to get 0xa0ff0000.
......@@ -886,7 +885,6 @@ iosapic_enable_irq(void *dev, int irq)
/* data is initialized by fixup_irq */
ASSERT(0 < vi->vi_txn_irq);
ASSERT(0UL != vi->vi_txn_addr);
ASSERT(0UL != vi->vi_txn_data);
iosapic_set_irt_data(vi, &d0, &d1);
......@@ -895,10 +893,10 @@ iosapic_enable_irq(void *dev, int irq)
#ifdef DEBUG_IOSAPIC_IRT
{
u32 *t = (u32 *) ((ulong) vi->vi_eoi_addr & ~0xffUL);
printk("iosapic_enable_irq(): regs %p", vi->vi_eoi_addr);
while (t < vi->vi_eoi_addr) printk(" %x", READ_U32(t++));
printk("\n");
u32 *t = (u32 *) ((ulong) vi->vi_eoi_addr & ~0xffUL);
printk("iosapic_enable_irq(): regs %p", vi->vi_eoi_addr);
while (t < vi->vi_eoi_addr) printk(" %x", READ_U32(t++));
printk("\n");
}
printk("iosapic_enable_irq(): sel ");
......@@ -943,10 +941,10 @@ iosapic_unmask_irq(void *dev, int irq)
static struct irq_region_ops iosapic_irq_ops = {
iosapic_disable_irq,
iosapic_enable_irq,
iosapic_mask_irq,
iosapic_unmask_irq
disable_irq: iosapic_disable_irq,
enable_irq: iosapic_enable_irq,
mask_irq: iosapic_mask_irq,
unmask_irq: iosapic_unmask_irq
};
......@@ -967,15 +965,9 @@ iosapic_rd_version(struct iosapic_info *isi)
}
#ifndef IOSAPIC_CALLBACK
/*
** iosapic_register() is the alternative to iosapic_driver_for().
** (Only one or the other should be implemented.)
*/
/*
** iosapic_register() is called by "drivers" with an integrated I/O SAPIC.
** Caller must be certain they have an I/O SAPIC and know it's MMIO address.
** Caller must be certain they have an I/O SAPIC and know its MMIO address.
**
** o allocate iosapic_info and add it to the list
** o read iosapic version and squirrel that away
......@@ -984,7 +976,7 @@ iosapic_rd_version(struct iosapic_info *isi)
** o allocate isi_region (registers region handlers)
*/
void *
iosapic_register(void *hpa)
iosapic_register(unsigned long hpa)
{
struct iosapic_info *isi = NULL;
struct irt_entry *irte = irt_cell;
......@@ -1021,7 +1013,7 @@ iosapic_register(void *hpa)
memset(isi, 0, sizeof(struct iosapic_info));
isi->isi_hpa = (unsigned char *) hpa;
isi->isi_hpa = hpa;
isi->isi_version = iosapic_rd_version(isi);
isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
......@@ -1034,6 +1026,7 @@ iosapic_register(void *hpa)
}
memset(vip, 0, sizeof(struct vector_info) * isi->isi_num_vectors);
sprintf(isi->isi_name, "IO-SAPIC%02d", iosapic_count++);
/*
** Initialize vector array
......@@ -1041,17 +1034,16 @@ iosapic_register(void *hpa)
for (cnt=0; cnt < isi->isi_num_vectors; cnt++, vip++) {
vip->vi_irqline = (unsigned char) cnt;
vip->vi_ios = isi;
sprintf(vip->vi_name, "%s-L%d", isi->isi_name, cnt);
}
isi->isi_region = alloc_irq_region(isi->isi_num_vectors,
&iosapic_irq_ops, IRQ_REG_DIS|IRQ_REG_MASK,
"I/O Sapic", (void *) isi->isi_vector);
&iosapic_irq_ops, isi->isi_name,
(void *) isi->isi_vector);
ASSERT(NULL != isi->isi_region);
return ((void *) isi);
}
#endif /* !IOSAPIC_CALLBACK */
#ifdef DEBUG_IOSAPIC
......@@ -1092,8 +1084,8 @@ iosapic_prt_isi(struct iosapic_info *isi)
{
ASSERT(NULL != isi);
printk(KERN_DEBUG MODULE_NAME ": io_sapic_info at %p\n", isi);
printk(KERN_DEBUG "\t\tisi_hpa: %p\n", isi->isi_hpa);
printk(KERN_DEBUG "\t\tisi_satus: %x\n", isi->isi_status);
printk(KERN_DEBUG "\t\tisi_hpa: %lx\n", isi->isi_hpa);
printk(KERN_DEBUG "\t\tisi_status: %x\n", isi->isi_status);
printk(KERN_DEBUG "\t\tisi_version: %x\n", isi->isi_version);
printk(KERN_DEBUG "\t\tisi_vector: %p\n", isi->isi_vector);
}
......
......@@ -111,22 +111,25 @@ struct vector_info {
struct irt_entry *vi_irte; /* IRT entry */
u32 *vi_eoi_addr; /* precalculate EOI reg address */
u32 vi_eoi_data; /* IA64: ? PA: swapped txn_data */
u8 vi_status; /* status/flags */
u8 vi_irqline; /* INTINn(IRQ) */
int vi_txn_irq; /* virtual IRQ number for processor */
ulong vi_txn_addr; /* IA64: id_eid PA: partial HPA */
ulong vi_txn_data; /* IA64: vector PA: EIR bit */
u8 vi_status; /* status/flags */
u8 vi_irqline; /* INTINn(IRQ) */
char vi_name[32]; /* user visible identity */
};
struct iosapic_info {
struct iosapic_info *isi_next; /* list of I/O SAPIC */
volatile void *isi_hpa; /* physical base address */
unsigned long isi_hpa; /* physical base address */
struct irq_region *isi_region; /* each I/O SAPIC is one region */
struct vector_info *isi_vector; /* IRdT (IRQ line) array */
int isi_num_vectors; /* size of IRdT array */
int isi_status; /* status/flags */
unsigned int isi_version; /* DEBUG: data fr version reg */
/* round up to next cacheline */
char isi_name[20]; /* identify region for users */
};
......
/*
* LASI Device Driver
*
* (c) Copyright 1999 Red Hat Software
* Portions (c) Copyright 1999 The Puffin Group Inc.
* Portions (c) Copyright 1999 Hewlett-Packard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* by Alan Cox <alan@redhat.com> and
* Alex deVries <adevries@thepuffingroup.com>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/led.h>
#include "gsc.h"
#define LASI_VER 0xC008 /* LASI Version */
#define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */
#define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */
static int lasi_choose_irq(struct parisc_device *dev)
{
int irq;
/*
** "irq" bits below are numbered relative to most significant bit.
*/
switch (dev->id.sversion) {
case 0x74: irq = 24; break; /* Centronics */
case 0x7B: irq = 18; break; /* Audio */
case 0x81: irq = 17; break; /* Lasi itself */
case 0x82: irq = 22; break; /* SCSI */
case 0x83: irq = 11; break; /* Floppy */
case 0x84: irq = 5; break; /* PS/2 Keyboard */
case 0x87: irq = 13; break; /* ISDN */
case 0x8A: irq = 23; break; /* LAN */
case 0x8C: irq = 26; break; /* RS232 */
case 0x8D: irq = (dev->hw_path == 13) ? 15 : 14;
break; /* Telephone */
default: irq = -1; break; /* unknown */
}
return irq;
}
static void __init
lasi_init_irq(struct busdevice *this_lasi)
{
unsigned long lasi_base = this_lasi->hpa;
/* Stop LASI barking for a bit */
gsc_writel(0x00000000, lasi_base+OFFSET_IMR);
/* clear pending interrupts */
gsc_readl(lasi_base+OFFSET_IRR);
/* We're not really convinced we want to reset the onboard
* devices. Firmware does it for us...
*/
/* Resets */
/* gsc_writel(0xFFFFFFFF, lasi_base+0x2000);*/ /* Parallel */
gsc_writel(0xFFFFFFFF, lasi_base+0x4004); /* Audio */
/* gsc_writel(0xFFFFFFFF, lasi_base+0x5000);*/ /* Serial */
/* gsc_writel(0xFFFFFFFF, lasi_base+0x6000);*/ /* SCSI */
gsc_writel(0xFFFFFFFF, lasi_base+0x7000); /* LAN */
gsc_writel(0xFFFFFFFF, lasi_base+0x8000); /* Keyboard */
gsc_writel(0xFFFFFFFF, lasi_base+0xA000); /* FDC */
/* Ok we hit it on the head with a hammer, our Dog is now
** comatose and muzzled. Devices will now unmask LASI
** interrupts as they are registered as irq's in the LASI range.
*/
/* XXX: I thought it was `awks that got `it on the `ead with an
* `ammer. -- willy
*/
}
/*
** lasi_led_init()
**
** lasi_led_init() initializes the LED controller on the LASI.
**
** Since Mirage and Electra machines use a different LED
** address register, we need to check for these machines
** explicitly.
*/
#ifndef CONFIG_CHASSIS_LCD_LED
#define lasi_led_init(x) /* nothing */
#else
void __init lasi_led_init(unsigned long lasi_hpa)
{
unsigned long datareg;
switch (CPU_HVERSION) {
/* Gecko machines have only one single LED, which can be permanently
turned on by writing a zero into the power control register. */
case 0x600: /* Gecko (712/60) */
case 0x601: /* Gecko (712/80) */
case 0x602: /* Gecko (712/100) */
case 0x603: /* Anole 64 (743/64) */
case 0x604: /* Anole 100 (743/100) */
case 0x605: /* Gecko (712/120) */
datareg = lasi_hpa + 0x0000C000;
gsc_writeb(0, datareg);
return; /* no need to register the LED interrupt-function */
/* Mirage and Electra machines need special offsets */
case 0x60A: /* Mirage Jr (715/64) */
case 0x60B: /* Mirage 100 */
case 0x60C: /* Mirage 100+ */
case 0x60D: /* Electra 100 */
case 0x60E: /* Electra 120 */
datareg = lasi_hpa - 0x00020000;
break;
default:
datareg = lasi_hpa + 0x0000C000;
break;
} /* switch() */
register_led_driver(DISPLAY_MODEL_LASI, LED_CMD_REG_NONE, (char *)datareg);
}
#endif
/*
* lasi_power_off
*
* Function for lasi to turn off the power. This is accomplished by setting a
* 1 to PWR_ON_L in the Power Control Register
*
*/
static unsigned long lasi_power_off_hpa;
static void lasi_power_off(void)
{
unsigned long datareg;
/* calculate addr of the Power Control Register */
datareg = lasi_power_off_hpa + 0x0000C000;
/* Power down the machine */
gsc_writel(0x02, datareg);
}
int __init
lasi_init_chip(struct parisc_device *dev)
{
struct busdevice *lasi;
struct gsc_irq gsc_irq;
int irq, ret;
lasi = kmalloc(sizeof(struct busdevice), GFP_KERNEL);
if (!lasi)
return -ENOMEM;
lasi->name = "Lasi";
lasi->hpa = dev->hpa;
/* Check the 4-bit (yes, only 4) version register */
lasi->version = gsc_readl(lasi->hpa + LASI_VER) & 0xf;
printk(KERN_INFO "%s version %d at 0x%lx found.\n",
lasi->name, lasi->version, lasi->hpa);
/* initialize the chassis LEDs really early */
lasi_led_init(lasi->hpa);
/* Stop LASI barking for a bit */
lasi_init_irq(lasi);
/* the IRQ lasi should use */
irq = gsc_alloc_irq(&gsc_irq);
if (irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
__FUNCTION__);
kfree(lasi);
return -EBUSY;
}
ret = request_irq(gsc_irq.irq, busdev_barked, 0, "lasi", lasi);
if (ret < 0) {
kfree(lasi);
return ret;
}
/* Save this for debugging later */
lasi->parent_irq = gsc_irq.irq;
lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
/* enable IRQ's for devices below LASI */
gsc_writel(lasi->eim, lasi->hpa + OFFSET_IAR);
/* Done init'ing, register this driver */
ret = gsc_common_irqsetup(dev, lasi);
if (ret) {
kfree(lasi);
return ret;
}
fixup_child_irqs(dev, lasi->busdev_region->data.irqbase,
lasi_choose_irq);
/* initialize the power off function */
/* FIXME: Record the LASI HPA for the power off function. This should
* ensure that only the first LASI (the one controlling the power off)
* should set the HPA here */
lasi_power_off_hpa = lasi->hpa;
pm_power_off = lasi_power_off;
return ret;
}
static struct parisc_device_id lasi_tbl[] = {
{ HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00081 },
{ 0, }
};
struct parisc_driver lasi_driver = {
name: "Lasi",
id_table: lasi_tbl,
probe: lasi_init_chip,
};
......@@ -34,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/init.h> /* for __init and __devinit */
/* #define PCI_DEBUG enable ASSERT */
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/slab.h>
......@@ -42,15 +43,13 @@
#include <asm/byteorder.h>
#include <asm/irq.h> /* for struct irq_region support */
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/page.h>
#include <asm/segment.h>
#include <asm/system.h>
#include <asm/hardware.h> /* for register_driver() stuff */
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <asm/iosapic.h> /* for iosapic_register() */
#include <asm/gsc.h> /* gsc_read/write stuff */
#include <asm/io.h> /* read/write stuff */
#ifndef TRUE
#define TRUE (1 == 1)
......@@ -62,6 +61,9 @@
#undef DEBUG_LBA_CFG /* debug Config Space Access (ie PCI Bus walk) */
#undef DEBUG_LBA_PAT /* debug PCI Resource Mgt code - PDC PAT only */
#undef FBB_SUPPORT /* Fast Back-Back xfers - NOT READY YET */
#ifdef DEBUG_LBA
#define DBG(x...) printk(x)
#else
......@@ -102,22 +104,6 @@
#define MODULE_NAME "lba"
static int lba_driver_callback(struct hp_device *, struct pa_iodc_driver *);
static struct pa_iodc_driver lba_drivers_for[]= {
{HPHW_BRIDGE, 0x782, 0x0, 0xa, 0,0,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "tbd", (void *) lba_driver_callback},
{0,0,0,0,0,0,
0,
(char *) NULL, (char *) NULL, (void *) NULL}
};
#define LBA_FUNC_ID 0x0000 /* function id */
#define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define LBA_CAPABLE 0x0030 /* capabilities register */
......@@ -137,6 +123,9 @@ static struct pa_iodc_driver lba_drivers_for[]= {
#define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
#define LBA_STAT_CTL 0x0108 /* Status & Control */
#define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
#define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
#define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
#define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
......@@ -162,15 +151,18 @@ static struct pa_iodc_driver lba_drivers_for[]= {
#define LBA_DMA_CTL 0x0278 /* firmware sets this */
/* RESET: ignore DMA stuff until we can measure performance */
#define LBA_IBASE 0x0300 /* DMA support */
#define LBA_IBASE 0x0300 /* SBA DMA support */
#define LBA_IMASK 0x0308
/* FIXME: ignore DMA Hint stuff until we can measure performance */
#define LBA_HINT_CFG 0x0310
#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
/* ERROR regs are needed for config cycle kluges */
#define LBA_ERROR_CONFIG 0x0680
#define LBA_SMART_MODE 0x20
#define LBA_ERROR_STATUS 0x0688
#define LBA_ROPE_CTL 0x06A0
#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
......@@ -232,19 +224,20 @@ static u32 lba_t32;
* BE WARNED: register writes are posted.
* (ie follow writes which must reach HW with a read)
*/
#define READ_U8(addr) gsc_readb(addr)
#define READ_U16(addr) gsc_readw((u16 *) (addr))
#define READ_U32(addr) gsc_readl((u32 *) (addr))
#define WRITE_U8(value, addr) gsc_writeb(value, addr)
#define WRITE_U16(value, addr) gsc_writew(value, (u16 *) (addr))
#define WRITE_U32(value, addr) gsc_writel(value, (u32 *) (addr))
#define READ_REG8(addr) gsc_readb(addr)
#define READ_REG16(addr) le16_to_cpu(gsc_readw((u16 *) (addr)))
#define READ_REG32(addr) le32_to_cpu(gsc_readl((u32 *) (addr)))
#define WRITE_REG8(value, addr) gsc_writeb(value, addr)
#define WRITE_REG16(value, addr) gsc_writew(cpu_to_le16(value), (u16 *) (addr))
#define WRITE_REG32(value, addr) gsc_writel(cpu_to_le32(value), (u32 *) (addr))
#define READ_U8(addr) __raw_readb(addr)
#define READ_U16(addr) __raw_readw(addr)
#define READ_U32(addr) __raw_readl(addr)
#define WRITE_U8(value, addr) __raw_writeb(value, addr)
#define WRITE_U16(value, addr) __raw_writew(value, addr)
#define WRITE_U32(value, addr) __raw_writel(value, addr)
#define READ_REG8(addr) readb(addr)
#define READ_REG16(addr) readw(addr)
#define READ_REG32(addr) readl(addr)
#define READ_REG64(addr) readq(addr)
#define WRITE_REG8(value, addr) writeb(value, addr)
#define WRITE_REG16(value, addr) writew(value, addr)
#define WRITE_REG32(value, addr) writel(value, addr)
#define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
......@@ -253,25 +246,12 @@ static u32 lba_t32;
#define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
#ifdef DEBUG_LBA
/* Extract LBA (Rope) number from HPA */
#define LBA_NUM(x) ((((uintptr_t) x) >> 13) & 0xf)
#endif /* DEBUG_LBA */
#ifdef __LP64__
/* PDC_PAT */
static unsigned long pdc_result[32] __attribute__ ((aligned (8))) = {0,0,0,0};
#endif
/*
** One time initialization to let the world know the LBA was found.
** This is the only routine which is NOT static.
** Must be called exactly once before pci_init().
** Extract LBA (Rope) number from HPA
** REVISIT: 16 ropes for Stretch/Ike?
*/
void __init lba_init(void)
{
register_driver(lba_drivers_for);
}
#define ROPES_PER_SBA 8
#define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_SBA-1))
static void
......@@ -282,9 +262,9 @@ lba_dump_res(struct resource *r, int d)
if (NULL == r)
return;
printk("(%p)", r->parent);
printk(KERN_DEBUG "(%p)", r->parent);
for (i = d; i ; --i) printk(" ");
printk("%p [%lx,%lx]/%x\n", r, r->start, r->end, (int) r->flags);
printk(KERN_DEBUG "%p [%lx,%lx]/%x\n", r, r->start, r->end, (int) r->flags);
lba_dump_res(r->child, d+2);
lba_dump_res(r->sibling, d);
}
......@@ -369,7 +349,7 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
* Set the smart mode bit so that master aborts don't cause \
* LBA to go into PCI fatal mode (required). \
*/ \
WRITE_REG32(error_config | 0x20, d->hba.base_addr + LBA_ERROR_CONFIG); \
WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
}
......@@ -414,9 +394,9 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
*
* Actually, there is still a race in which
* we could be clearing a fatal error. We will
* live with this during our real mode bus walk
* live with this during our initial bus walk
* until rev 4.0 (no driver activity during
* real mode bus walk). The real mode bus walk
* initial bus walk). The initial bus walk
* has race conditions concerning the use of
* smart mode as well.
*/
......@@ -430,7 +410,7 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
* Set clear enable (CE) bit. Unset by HW when new \
* errors are logged -- LBA HW ERS section 14.3.3). \
*/ \
WRITE_REG32(status_control | 0x20, base + LBA_STAT_CTL); \
WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
error_status = READ_REG32(base + LBA_ERROR_STATUS); \
if ((error_status & 0x1f) != 0) { \
/* \
......@@ -442,7 +422,7 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
* Clear error status (if fatal bit not set) by setting \
* clear error log bit (CL). \
*/ \
WRITE_REG32(status_control | 0x10, base + LBA_STAT_CTL); \
WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
} \
} \
}
......@@ -483,7 +463,7 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
static unsigned int
lba_rd_cfg( struct lba_device *d, u32 tok, u8 reg, u32 size)
lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
{
u32 data = ~0;
int error = 0;
......@@ -525,44 +505,45 @@ lba_rd_cfg( struct lba_device *d, u32 tok, u8 reg, u32 size)
}
static int lba_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
{
struct lba_device *d = LBA_DEV(bus->sysdata);
u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
u32 tok = LBA_CFG_TOK(local_bus, devfn);
/* FIXME: B2K/C3600 workaround is always use old method... */
/* if (!LBA_TR4PLUS(d) && !LBA_SKIP_PROBE(d)) */ {
/* original - Generate config cycle on broken elroy
with risk we will miss PCI bus errors. */
*data = lba_rd_cfg(d, tok, pos, size);
DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __FUNCTION__, tok, pos, *data);
return(*data == ~0UL);
}
#define LBA_CFG_RD(size, mask) \
static int lba_cfg_read##size (struct pci_dev *dev, int pos, u##size *data) \
{ \
struct lba_device *d = LBA_DEV(dev->bus->sysdata); \
u32 local_bus = (dev->bus->parent == NULL) ? 0 : dev->bus->secondary; \
u32 tok = LBA_CFG_TOK(local_bus,dev->devfn); \
\
if ((!LBA_TR4PLUS(d)) && (!LBA_SKIP_PROBE(d))) { \
/* original - Generate config cycle on broken elroy \
with risk we will miss PCI bus errors. */ \
*data = (u##size) lba_rd_cfg(d, tok, pos, sizeof(u##size)); \
DBG_CFG(KERN_DEBUG "%s(%s+%2x) -> 0x%x (a)\n", __FUNCTION__, dev->slot_name, pos, *data); \
return(*data == (u##size) -1); \
} \
\
if (LBA_SKIP_PROBE(d) && (!lba_device_present(dev->bus->secondary, dev->devfn, d))) \
{ \
DBG_CFG(KERN_DEBUG "%s(%s+%2x) -> -1 (b)\n", __FUNCTION__, dev->slot_name, pos, *data); \
/* either don't want to look or know device isn't present. */ \
*data = (u##size) -1; \
return(0); \
} \
\
/* Basic Algorithm \
** Should only get here on fully working LBA rev. \
** This is how simple the code should have been. \
*/ \
LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); \
*data = READ_REG##size(d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & mask));\
DBG_CFG(KERN_DEBUG "%s(%s+%2x) -> 0x%x (c)\n", __FUNCTION__, dev->slot_name, pos, *data);\
return(*data == (u##size) -1); \
}
LBA_CFG_RD( 8, 3)
LBA_CFG_RD(16, 2)
LBA_CFG_RD(32, 0)
if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d)))
{
DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __FUNCTION__, tok, pos);
/* either don't want to look or know device isn't present. */
*data = ~0U;
return(0);
}
/* Basic Algorithm
** Should only get here on fully working LBA rev.
** This is how simple the code should have been.
*/
LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
switch(size) {
case 1: *(u8 *) data = READ_REG8(d->hba.base_addr + LBA_PCI_CFG_DATA);
break;
case 2: *(u16 *) data = READ_REG16(d->hba.base_addr + LBA_PCI_CFG_DATA);
break;
case 4: *(u32 *) data = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_DATA);
break;
}
DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __FUNCTION__, tok, pos, *data);
return(*data == ~0U);
}
static void
......@@ -608,53 +589,53 @@ lba_wr_cfg( struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
* by doing a read of CONFIG ADDR after the write.
*/
#define LBA_CFG_WR(size, mask) \
static int lba_cfg_write##size (struct pci_dev *dev, int pos, u##size data) \
{ \
struct lba_device *d = LBA_DEV(dev->bus->sysdata); \
u32 local_bus = (dev->bus->parent == NULL) ? 0 : dev->bus->secondary; \
u32 tok = LBA_CFG_TOK(local_bus,dev->devfn); \
\
ASSERT((tok & 0xff) == 0); \
ASSERT(pos < 0x100); \
\
if ((!LBA_TR4PLUS(d)) && (!LBA_SKIP_PROBE(d))) { \
/* Original Workaround */ \
lba_wr_cfg(d, tok, pos, (u32) data, sizeof(u##size)); \
DBG_CFG(KERN_DEBUG "%s(%s+%2x) = 0x%x (a)\n", __FUNCTION__, dev->slot_name, pos, data); \
return 0; \
} \
\
if (LBA_SKIP_PROBE(d) && (!lba_device_present(dev->bus->secondary, dev->devfn, d))) { \
DBG_CFG(KERN_DEBUG "%s(%s+%2x) = 0x%x (b)\n", __FUNCTION__, dev->slot_name, pos, data); \
return 1; /* New Workaround */ \
} \
\
DBG_CFG(KERN_DEBUG "%s(%s+%2x) = 0x%x (c)\n", __FUNCTION__, dev->slot_name, pos, data); \
/* Basic Algorithm */ \
LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); \
WRITE_REG##size(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & mask)); \
lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR); \
return 0; \
}
static int lba_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
{
struct lba_device *d = LBA_DEV(bus->sysdata);
u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
u32 tok = LBA_CFG_TOK(local_bus,devfn);
ASSERT((tok & 0xff) == 0);
ASSERT(pos < 0x100);
LBA_CFG_WR( 8, 3)
LBA_CFG_WR(16, 2)
LBA_CFG_WR(32, 0)
if (!LBA_TR4PLUS(d) && !LBA_SKIP_PROBE(d)) {
/* Original Workaround */
lba_wr_cfg(d, tok, pos, (u32) data, size);
DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __FUNCTION__, tok, pos,data);
return 0;
}
static struct pci_ops lba_cfg_ops = {
lba_cfg_read8, lba_cfg_read16, lba_cfg_read32,
lba_cfg_write8, lba_cfg_write16, lba_cfg_write32
if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) {
DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __FUNCTION__, tok, pos,data);
return 1; /* New Workaround */
}
DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data);
/* Basic Algorithm */
LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
switch(size) {
case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA);
break;
case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
break;
case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
break;
}
lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
return 0;
}
};
static struct pci_ops lba_cfg_ops = {
read: lba_cfg_read,
write: lba_cfg_write,
};
static void
lba_bios_init(void)
{
DBG(KERN_DEBUG MODULE_NAME ": lba_bios_init\n");
DBG(MODULE_NAME ": lba_bios_init\n");
}
......@@ -675,7 +656,7 @@ lba_claim_dev_resources(struct pci_dev *dev)
u16 cmd;
int i, srch_flags;
(void) lba_cfg_read16(dev, PCI_COMMAND, &cmd);
(void) pci_read_config_word(dev, PCI_COMMAND, &cmd);
srch_flags = (cmd & PCI_COMMAND_IO) ? IORESOURCE_IO : 0;
if (cmd & PCI_COMMAND_MEMORY)
......@@ -712,13 +693,13 @@ static void
lba_fixup_bus(struct pci_bus *bus)
{
struct list_head *ln;
struct pci_dev *dev;
#ifdef FBB_SUPPORT
u16 fbb_enable = PCI_STATUS_FAST_BACK;
u16 status;
struct lba_device *ldev = LBA_DEV(bus->sysdata);
#ifdef __LP64__
int i;
#endif
struct lba_device *ldev = LBA_DEV(bus->sysdata);
int lba_portbase = HBA_PORT_BASE(ldev->hba.hba_num);
DBG("lba_fixup_bus(0x%p) bus %d sysdata 0x%p\n",
bus, bus->secondary, bus->sysdata);
......@@ -731,63 +712,119 @@ lba_fixup_bus(struct pci_bus *bus)
DBG("lba_fixup_bus() %s [%lx/%lx]/%x\n",
ldev->hba.io_space.name,
ldev->hba.io_space.start,
ldev->hba.io_space.end,
ldev->hba.io_space.start, ldev->hba.io_space.end,
(int) ldev->hba.io_space.flags);
DBG("lba_fixup_bus() %s [%lx/%lx]/%x\n",
ldev->hba.mem_space.name,
ldev->hba.mem_space.start,
ldev->hba.mem_space.end,
(int) ldev->hba.mem_space.flags);
ldev->hba.lmmio_space.name,
ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
(int) ldev->hba.lmmio_space.flags);
err = request_resource(&ioport_resource, &(ldev->hba.io_space));
if (err < 0) {
BUG();
lba_dump_res(&ioport_resource, 2);
}
err = request_resource(&iomem_resource, &(ldev->hba.mem_space));
err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
if (err < 0) {
BUG();
lba_dump_res(&iomem_resource, 2);
}
bus->resource[0] = &(ldev->hba.io_space);
bus->resource[1] = &(ldev->hba.mem_space);
bus->resource[1] = &(ldev->hba.lmmio_space);
} else {
/* KLUGE ALERT!
** PCI-PCI Bridge resource munging.
** This hack should go away in the near future.
** It's based on the Alpha port.
*/
int i;
u16 cmd;
for (i = 0; i < 4; i++) {
bus->resource[i] =
&bus->self->resource[PCI_BRIDGE_RESOURCES+i];
bus->resource[i]->name = bus->name;
}
#if 0
bus->resource[0]->flags |= pci_bridge_check_io(bus->self);
#else
bus->resource[0]->flags |= IORESOURCE_IO;
#endif
bus->resource[1]->flags |= IORESOURCE_MEM;
bus->resource[2]->flags = 0; /* Don't support prefetchable */
bus->resource[3]->flags = 0; /* not used */
/*
** If the PPB is enabled (ie already configured) then
** just read those values.
*/
(void) pci_read_config_word(bus->self, PCI_COMMAND, &cmd);
if (cmd & (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)) {
pci_read_bridge_bases(bus);
} else {
/* Not configured.
** For now, propogate HBA limits to the bus;
** PCI will adjust them later.
*/
bus->resource[0]->end = ldev->hba.io_space.end;
bus->resource[1]->end = ldev->hba.lmmio_space.end;
}
/* Turn off downstream PF memory address range by default */
bus->resource[2]->start = 1024*1024;
bus->resource[2]->end = bus->resource[2]->start - 1;
}
list_for_each(ln, &bus->devices) {
int i;
struct pci_dev *dev = pci_dev_b(ln);
dev = pci_dev_b(ln);
DBG("lba_fixup_bus() %s\n", dev->name);
#ifdef __LP64__
/* Virtualize Device/Bridge Resources. */
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
/* If resource not allocated - skip it */
if (!res->start)
continue;
if (res->flags & IORESOURCE_IO) {
DBG("lba_fixup_bus() I/O Ports [%lx/%lx] -> ",
res->start, res->end);
res->start |= lba_portbase;
res->end |= lba_portbase;
DBG("[%lx/%lx]\n", res->start, res->end);
} else if (res->flags & IORESOURCE_MEM) {
/*
** 0-5 are the "standard PCI regions"
** (see comments near PCI_NUM_RESOURCES in include/linux/pci.h)
** Convert PCI (IO_VIEW) addresses to
** processor (PA_VIEW) addresses
*/
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *res = &(dev->resource[i]);
if (res->flags & IORESOURCE_MEM) {
/* "Globalize" PCI address */
res->start |= ldev->lmmio_base;
res->end |= ldev->lmmio_base;
DBG("lba_fixup_bus() MMIO [%lx/%lx] -> ",
res->start, res->end);
res->start = PCI_HOST_ADDR(HBA_DATA(ldev), res->start);
res->end = PCI_HOST_ADDR(HBA_DATA(ldev), res->end);
DBG("[%lx/%lx]\n", res->start, res->end);
}
}
#endif
#ifdef FBB_SUPPORT
/*
** If one device does not support FBB transfers,
** No one on the bus can be allowed to use them.
*/
(void) lba_cfg_read16(dev, PCI_STATUS, &status);
(void) pci_read_config_word(dev, PCI_STATUS, &status);
fbb_enable &= status;
#endif
#ifdef __LP64__
if (pdc_pat) {
if (is_pdc_pat()) {
/* Claim resources for PDC's devices */
lba_claim_dev_resources(dev);
}
#endif /* __LP64__ */
#endif
/*
** P2PB's have no IRQs. ignore them.
......@@ -796,20 +833,20 @@ lba_fixup_bus(struct pci_bus *bus)
continue;
/* Adjust INTERRUPT_LINE for this dev */
iosapic_fixup_irq(LBA_DEV(bus->sysdata)->iosapic_obj, dev);
iosapic_fixup_irq(ldev->iosapic_obj, dev);
}
#if 0
#ifdef FBB_SUPPORT
/* FIXME/REVISIT - finish figuring out to set FBB on both
** pbus_set_ranges() clobbers PCI_BRIDGE_CONTROL.
** pci_setup_bridge() clobbers PCI_BRIDGE_CONTROL.
** Can't fixup here anyway....garr...
*/
if (fbb_enable) {
if (bus->self) {
u8 control;
/* enable on PPB */
(void) lba_cfg_read8(bus->self, PCI_BRIDGE_CONTROL, &control);
(void) lba_cfg_write8(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
(void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
(void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
} else {
/* enable on LBA */
......@@ -819,17 +856,17 @@ lba_fixup_bus(struct pci_bus *bus)
/* Lastly enable FBB/PERR/SERR on all devices too */
list_for_each(ln, &bus->devices) {
(void) lba_cfg_read16(dev, PCI_COMMAND, &status);
(void) pci_read_config_word(dev, PCI_COMMAND, &status);
status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
(void) lba_cfg_write16(dev, PCI_COMMAND, status);
(void) pci_write_config_word(dev, PCI_COMMAND, status);
}
#endif
}
struct pci_bios_ops lba_bios_ops = {
lba_bios_init,
lba_fixup_bus /* void lba_fixup_bus(struct pci_bus *bus) */
init: lba_bios_init,
fixup_bus: lba_fixup_bus,
};
......@@ -853,8 +890,6 @@ struct pci_bios_ops lba_bios_ops = {
static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
{ \
u##size t; \
ASSERT(bus != NULL); \
DBG_PORT(KERN_DEBUG "%s(0x%p, 0x%x) ->", __FUNCTION__, bus, addr); \
t = READ_REG##size(LBA_ASTRO_PORT_BASE + addr); \
DBG_PORT(" 0x%x\n", t); \
return (t); \
......@@ -895,8 +930,8 @@ LBA_PORT_IN(32, 0)
#define LBA_PORT_OUT(size, mask) \
static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
{ \
ASSERT(bus != NULL); \
DBG_PORT(KERN_DEBUG "%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, d, addr, val); \
ASSERT(d != NULL); \
DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, d, addr, val); \
WRITE_REG##size(val, LBA_ASTRO_PORT_BASE + addr); \
if (LBA_DEV(d)->hw_rev < 3) \
lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
......@@ -908,13 +943,16 @@ LBA_PORT_OUT(32, 0)
static struct pci_port_ops lba_astro_port_ops = {
lba_astro_in8, lba_astro_in16, lba_astro_in32,
lba_astro_out8, lba_astro_out16, lba_astro_out32
inb: lba_astro_in8,
inw: lba_astro_in16,
inl: lba_astro_in32,
outb: lba_astro_out8,
outw: lba_astro_out16,
outl: lba_astro_out32
};
#ifdef __LP64__
#define PIOP_TO_GMMIO(lba, addr) \
((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
......@@ -936,7 +974,7 @@ static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
{ \
u##size t; \
ASSERT(bus != NULL); \
DBG_PORT(KERN_DEBUG "%s(0x%p, 0x%x) ->", __FUNCTION__, l, addr); \
DBG_PORT("%s(0x%p, 0x%x) ->", __FUNCTION__, l, addr); \
t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
DBG_PORT(" 0x%x\n", t); \
return (t); \
......@@ -953,7 +991,7 @@ static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
{ \
void *where = (void *) PIOP_TO_GMMIO(LBA_DEV(l), addr); \
ASSERT(bus != NULL); \
DBG_PORT(KERN_DEBUG "%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \
DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \
WRITE_REG##size(val, where); \
/* flush the I/O down to the elroy at least */ \
lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
......@@ -965,8 +1003,12 @@ LBA_PORT_OUT(32, 0)
static struct pci_port_ops lba_pat_port_ops = {
lba_pat_in8, lba_pat_in16, lba_pat_in32,
lba_pat_out8, lba_pat_out16, lba_pat_out32
inb: lba_pat_in8,
inw: lba_pat_in16,
inl: lba_pat_in32,
outb: lba_pat_out8,
outw: lba_pat_out16,
outl: lba_pat_out32
};
......@@ -978,30 +1020,27 @@ static struct pci_port_ops lba_pat_port_ops = {
** We don't have a struct pci_bus assigned to us yet.
*/
static void
lba_pat_resources( struct hp_device *d, struct lba_device *lba_dev)
lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
{
unsigned long bytecnt;
pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; /* PA_VIEW */
#ifdef DONT_NEED_THIS_FOR_ASTRO
pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* IO_VIEW */
long io_count;
#endif
long status; /* PDC return status */
long pa_count;
int i;
/* return cell module (IO view) */
status = pdc_pat_cell_module(& pdc_result, d->pcell_loc, d->mod_index,
status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
PA_VIEW, & pa_pdc_cell);
pa_count = pa_pdc_cell.mod[1];
#ifdef DONT_NEED_THIS_FOR_ASTRO
status |= pdc_pat_cell_module(& pdc_result, d->pcell_loc, d->mod_index,
IO_VIEW, & io_pdc_cell);
status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
IO_VIEW, &io_pdc_cell);
io_count = io_pdc_cell.mod[1];
#endif
/* We've already done this once for device discovery...*/
if (status != PDC_RET_OK) {
if (status != PDC_OK) {
panic("pdc_pat_cell_module() call failed for LBA!\n");
}
......@@ -1017,10 +1056,11 @@ lba_pat_resources( struct hp_device *d, struct lba_device *lba_dev)
unsigned long type;
unsigned long start;
unsigned long end; /* aka finish */
} *p;
} *p, *io;
struct resource *r;
p = (void *) &(pa_pdc_cell.mod[2+i*3]);
io = (void *) &(io_pdc_cell.mod[2+i*3]);
/* Convert the PAT range data to PCI "struct resource" */
switch(p->type & 0xff) {
......@@ -1030,9 +1070,9 @@ lba_pat_resources( struct hp_device *d, struct lba_device *lba_dev)
break;
case PAT_LMMIO:
/* used to fix up pre-initialized MEM BARs */
lba_dev->lmmio_base = p->start;
lba_dev->hba.lmmio_space_offset = p->start - io->start;
r = &(lba_dev->hba.mem_space);
r = &(lba_dev->hba.lmmio_space);
r->name = "LBA LMMIO";
r->start = p->start;
r->end = p->end;
......@@ -1060,8 +1100,8 @@ lba_pat_resources( struct hp_device *d, struct lba_device *lba_dev)
r = &(lba_dev->hba.io_space);
r->name = "LBA I/O Port";
r->start = lba_dev->hba.hba_num << 16;
r->end = r->start + 0xffffUL;
r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
r->flags = IORESOURCE_IO;
r->parent = r->sibling = r->child = NULL;
break;
......@@ -1077,17 +1117,22 @@ lba_pat_resources( struct hp_device *d, struct lba_device *lba_dev)
static void
lba_legacy_resources( struct hp_device *d, struct lba_device *lba_dev)
lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
{
int lba_num;
struct resource *r;
unsigned long rsize;
int lba_num;
#ifdef __LP64__
/*
** Used to sign extend instead BAR values are only 32-bit.
** 64-bit BARs have the upper 32-bit's zero'd by firmware.
** "Sprockets" PDC initializes for 32-bit OS.
** Sign extend all BAR values on "legacy" platforms.
** "Sprockets" PDC (Forte/Allegro) initializes everything
** for "legacy" 32-bit OS (HPUX 10.20).
** Upper 32-bits of 64-bit BAR will be zero too.
*/
lba_dev->lmmio_base = 0xffffffff00000000UL;
lba_dev->hba.lmmio_space_offset = 0xffffffff00000000UL;
#else
lba_dev->hba.lmmio_space_offset = 0UL;
#endif
/*
......@@ -1097,7 +1142,7 @@ lba_legacy_resources( struct hp_device *d, struct lba_device *lba_dev)
** PCI bus walk *should* end up with the same result.
** FIXME: But we don't have sanity checks in PCI or LBA.
*/
lba_num = READ_REG32(d->hpa + LBA_FW_SCRATCH);
lba_num = READ_REG32(pa_dev->hpa + LBA_FW_SCRATCH);
r = &(lba_dev->hba.bus_num);
r->name = "LBA PCI Busses";
r->start = lba_num & 0xff;
......@@ -1106,19 +1151,67 @@ lba_legacy_resources( struct hp_device *d, struct lba_device *lba_dev)
/* Set up local PCI Bus resources - we don't really need
** them for Legacy boxes but it's nice to see in /proc.
*/
r = &(lba_dev->hba.mem_space);
r = &(lba_dev->hba.lmmio_space);
r->name = "LBA PCI LMMIO";
r->flags = IORESOURCE_MEM;
r->start = READ_REG32(d->hpa + LBA_LMMIO_BASE);
r->end = r->start + ~ (READ_REG32(d->hpa + LBA_LMMIO_MASK));
/* Ignore "Range Enable" bit in the BASE register */
r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev),
((long) READ_REG32(pa_dev->hpa + LBA_LMMIO_BASE)) & ~1UL);
rsize = ~READ_REG32(pa_dev->hpa + LBA_LMMIO_MASK) + 1;
/*
** Each rope only gets part of the distributed range.
** Adjust "window" for this rope
*/
rsize /= ROPES_PER_SBA;
r->start += rsize * LBA_NUM(pa_dev->hpa);
r->end = r->start + rsize - 1 ;
/*
** XXX FIXME - ignore LBA_ELMMIO_BASE for now
** "Directed" ranges are used when the "distributed range" isn't
** sufficient for all devices below a given LBA. Typically devices
** like graphics cards or X25 may need a directed range when the
** bus has multiple slots (ie multiple devices) or the device
** needs more than the typical 4 or 8MB a distributed range offers.
**
** The main reason for ignoring it now frigging complications.
** Directed ranges may overlap (and have precedence) over
** distributed ranges. Ie a distributed range assigned to a unused
** rope may be used by a directed range on a different rope.
** Support for graphics devices may require fixing this
** since they may be assigned a directed range which overlaps
** an existing (but unused portion of) distributed range.
*/
r = &(lba_dev->hba.elmmio_space);
r->name = "extra LBA PCI LMMIO";
r->flags = IORESOURCE_MEM;
r->start = READ_REG32(pa_dev->hpa + LBA_ELMMIO_BASE);
r->end = 0;
/* check Range Enable bit */
if (r->start & 1) {
/* First baby step to getting Direct Ranges listed in /proc.
** AFAIK, only Sprockets PDC will setup a directed Range.
*/
r->start &= ~1;
r->end = r->start;
r->end += ~READ_REG32(pa_dev->hpa + LBA_ELMMIO_MASK);
printk(KERN_DEBUG "WARNING: Ignoring enabled ELMMIO BASE 0x%0lx SIZE 0x%lx\n",
r->start,
r->end + 1);
}
r = &(lba_dev->hba.io_space);
r->name = "LBA PCI I/O Ports";
r->flags = IORESOURCE_IO;
r->start = READ_REG32(d->hpa + LBA_IOS_BASE);
r->end = r->start + (READ_REG32(d->hpa + LBA_IOS_MASK) ^ 0xffff);
r->start = READ_REG32(pa_dev->hpa + LBA_IOS_BASE) & ~1L;
r->end = r->start + (READ_REG32(pa_dev->hpa + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
lba_num = lba_dev->hba.hba_num << 16;
/* Virtualize the I/O Port space ranges */
lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
r->start |= lba_num;
r->end |= lba_num;
}
......@@ -1136,29 +1229,92 @@ lba_legacy_resources( struct hp_device *d, struct lba_device *lba_dev)
**
**************************************************************************/
static void
static int __init
lba_hw_init(struct lba_device *d)
{
u32 stat;
u32 bus_reset; /* PDC_PAT_BUG */
#if 0
printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
d->hba.base_addr,
READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
printk(KERN_DEBUG " HINT reg ");
{ int i;
for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
printk(" %Lx", READ_REG64(d->hba.base_addr + i));
}
printk("\n");
#endif /* DEBUG_LBA_PAT */
#ifdef __LP64__
#warning FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support
#endif
/* PDC_PAT_BUG: exhibited in rev 40.48 on L2000 */
bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
if (bus_reset) {
printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
}
stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
if (stat & LBA_SMART_MODE) {
printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
stat &= ~LBA_SMART_MODE;
WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
}
/* Set HF mode as the default (vs. -1 mode). */
stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
/*
** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
** if it's not already set. If we just cleared the PCI Bus Reset
** signal, wait a bit for the PCI devices to recover and setup.
*/
if (bus_reset)
mdelay(pci_post_reset_delay);
if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
/*
** PDC_PAT_BUG: PDC rev 40.48 on L2000.
** B2000/C3600/J6000 also have this problem?
**
** Elroys with hot pluggable slots don't get configured
** correctly if the slot is empty. ARB_MASK is set to 0
** and we can't master transactions on the bus if it's
** not at least one. 0x3 enables elroy and first slot.
*/
printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
}
/*
** FIXME: Hint registers are programmed with default hint
** values by firmware. Hints should be sane even if we
** can't reprogram them the way drivers want.
*/
return 0;
}
static void
static void __init
lba_common_init(struct lba_device *lba_dev)
{
pci_bios = &lba_bios_ops;
pcibios_register_hba((struct pci_hba_data *)lba_dev);
pcibios_register_hba(HBA_DATA(lba_dev));
lba_dev->lba_lock = SPIN_LOCK_UNLOCKED;
/*
......@@ -1176,32 +1332,31 @@ lba_common_init(struct lba_device *lba_dev)
** If so, initialize the chip and tell other partners in crime they
** have work to do.
*/
static __init int
lba_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
static int __init
lba_driver_callback(struct parisc_device *dev)
{
struct lba_device *lba_dev;
struct pci_bus *lba_bus;
u32 func_class;
void *tmp_obj;
/* from drivers/pci/setup-bus.c */
extern void __init pbus_set_ranges(struct pci_bus *, struct pbus_set_ranges_data *);
char *version;
/* Read HW Rev First */
func_class = READ_REG32(d->hpa + LBA_FCLASS);
func_class = READ_REG32(dev->hpa + LBA_FCLASS);
func_class &= 0xf;
switch (func_class) {
case 0: dri->version = "TR1.0"; break;
case 1: dri->version = "TR2.0"; break;
case 2: dri->version = "TR2.1"; break;
case 3: dri->version = "TR2.2"; break;
case 4: dri->version = "TR3.0"; break;
case 5: dri->version = "TR4.0"; break;
default: dri->version = "TR4+";
case 0: version = "TR1.0"; break;
case 1: version = "TR2.0"; break;
case 2: version = "TR2.1"; break;
case 3: version = "TR2.2"; break;
case 4: version = "TR3.0"; break;
case 5: version = "TR4.0"; break;
default: version = "TR4+";
}
printk("%s version %s (0x%x) found at 0x%p\n", dri->name, dri->version, func_class & 0xf, d->hpa);
printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
MODULE_NAME, version, func_class & 0xf, dev->hpa);
/* Just in case we find some prototypes... */
if (func_class < 2) {
......@@ -1212,22 +1367,16 @@ lba_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
/*
** Tell I/O SAPIC driver we have a IRQ handler/region.
*/
tmp_obj = iosapic_register(d->hpa+LBA_IOSAPIC_BASE);
if (NULL == tmp_obj) {
/* iosapic may have failed. But more likely the
** slot isn't occupied and thus has no IRT entries.
** iosapic_register looks for this iosapic in the IRT
** before bothering to allocating data structures
** we don't need.
tmp_obj = iosapic_register(dev->hpa + LBA_IOSAPIC_BASE);
/* NOTE: PCI devices (e.g. 103c:1005 graphics card) which don't
** have an IRT entry will get NULL back from iosapic code.
*/
DBG(KERN_WARNING MODULE_NAME ": iosapic_register says not used\n");
return (1);
}
lba_dev = kmalloc(sizeof(struct lba_device), GFP_KERNEL);
if (NULL == lba_dev)
{
printk("lba_init_chip - couldn't alloc lba_device\n");
printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
return(1);
}
......@@ -1242,48 +1391,45 @@ lba_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
*/
lba_dev->hw_rev = func_class;
lba_dev->hba.base_addr = d->hpa; /* faster access */
lba_dev->hba.base_addr = dev->hpa; /* faster access */
lba_dev->hba.dev = dev;
lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */
lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */
/* ------------ Second : initialize common stuff ---------- */
lba_common_init(lba_dev);
lba_hw_init(lba_dev);
if (lba_hw_init(lba_dev))
return(1);
/* ---------- Third : setup I/O Port and MMIO resources --------- */
#ifdef __LP64__
if (pdc_pat) {
#ifdef __LP64__
if (is_pdc_pat()) {
/* PDC PAT firmware uses PIOP region of GMMIO space. */
pci_port = &lba_pat_port_ops;
/* Go ask PDC PAT what resources this LBA has */
lba_pat_resources(d, lba_dev);
} else {
lba_pat_resources(dev, lba_dev);
} else
#endif
{
/* Sprockets PDC uses NPIOP region */
pci_port = &lba_astro_port_ops;
/* Poke the chip a bit for /proc output */
lba_legacy_resources(d, lba_dev);
#ifdef __LP64__
lba_legacy_resources(dev, lba_dev);
}
#endif
/*
** Tell PCI support another PCI bus was found.
** Walks PCI bus for us too.
*/
lba_bus = lba_dev->hba.hba_bus =
pci_scan_bus( lba_dev->hba.bus_num.start, &lba_cfg_ops, (void *) lba_dev);
pci_scan_bus(lba_dev->hba.bus_num.start, &lba_cfg_ops, (void *) lba_dev);
#ifdef __LP64__
if (pdc_pat) {
/* determine window sizes needed by PCI-PCI bridges */
DBG_PAT("LBA pcibios_size_bridge()\n");
pcibios_size_bridge(lba_bus, NULL);
if (is_pdc_pat()) {
/* assign resources to un-initialized devices */
DBG_PAT("LBA pcibios_assign_unassigned_resources()\n");
pcibios_assign_unassigned_resources(lba_bus);
......@@ -1292,14 +1438,10 @@ lba_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
DBG_PAT("\nLBA PIOP resource tree\n");
lba_dump_res(&lba_dev->hba.io_space, 2);
DBG_PAT("\nLBA LMMIO resource tree\n");
lba_dump_res(&lba_dev->hba.mem_space, 2);
lba_dump_res(&lba_dev->hba.lmmio_space, 2);
#endif
/* program *all* PCI-PCI bridge range registers */
DBG_PAT("LBA pbus_set_ranges()\n");
pbus_set_ranges(lba_bus, NULL);
}
#endif /* __LP64__ */
#endif
/*
** Once PCI register ops has walked the bus, access to config
......@@ -1314,33 +1456,43 @@ lba_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
return 0;
}
static struct parisc_device_id lba_tbl[] = {
{ HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x782, 0xa },
{ 0, }
};
static struct parisc_driver lba_driver = {
name: MODULE_NAME,
id_table: lba_tbl,
probe: lba_driver_callback
};
/*
** One time initialization to let the world know the LBA was found.
** Must be called exactly once before pci_init().
*/
void __init lba_init(void)
{
register_parisc_driver(&lba_driver);
}
/*
** Initialize the IBASE/IMASK registers for LBA (Elroy).
** Only called from sba_iommu.c initialization sequence.
** Only called from sba_iommu.c in order to route ranges (MMIO vs DMA).
** sba_iommu is responsible for locking (none needed at init time).
*/
void lba_init_iregs(void *sba_hpa, u32 ibase, u32 imask)
void
lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
{
extern struct pci_hba_data *hba_list; /* arch/parisc/kernel/pci.c */
struct pci_hba_data *lba;
unsigned long base_addr = lba->hpa;
imask <<= 2; /* adjust for hints - 2 more bits */
ASSERT((ibase & 0x003fffff) == 0);
ASSERT((imask & 0x003fffff) == 0);
/* FIXME: sba_hpa is intended to search some table to
** determine which LBA's belong to the caller's SBA.
** IS_ASTRO: just assume only one SBA for now.
*/
ASSERT(NULL != hba_list);
DBG(KERN_DEBUG "%s() ibase 0x%x imask 0x%x\n", __FUNCTION__, ibase, imask);
for (lba = hba_list; NULL != lba; lba = lba->next) {
DBG(KERN_DEBUG "%s() base_addr %p\n", __FUNCTION__, lba->base_addr);
WRITE_REG32( imask, lba->base_addr + LBA_IMASK);
WRITE_REG32( ibase, lba->base_addr + LBA_IBASE);
}
DBG(KERN_DEBUG "%s() done\n", __FUNCTION__);
DBG("%s() ibase 0x%x imask 0x%x\n", __FUNCTION__, ibase, imask);
WRITE_REG32( imask, base_addr + LBA_IMASK);
WRITE_REG32( ibase, base_addr + LBA_IBASE);
}
......@@ -3,34 +3,63 @@
*
* (c) Copyright 2000 Red Hat Software
* (c) Copyright 2000 Helge Deller <hdeller@redhat.com>
* (c) Copyright 2001-2002 Helge Deller <deller@gmx.de>
* (c) Copyright 2001 Randolph Chung <tausq@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* TODO:
* - speed-up calculations with inlined assembler
* - interface to write to second row of LCD from /proc
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/stddef.h> /* for offsetof() */
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/bitops.h>
#include <linux/version.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/reboot.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <asm/io.h>
#include <asm/gsc.h>
#include <asm/processor.h>
#include <asm/hardware.h>
#include <asm/param.h> /* HZ */
#include <asm/led.h>
#include <asm/pdc.h>
#include <asm/uaccess.h>
/* The control of the LEDs and LCDs on PARISC-machines have to be done
completely in software. The necessary calculations are done in a tasklet
which is scheduled at every timer interrupt and since the calculations
may consume relatively much CPU-time some of the calculations can be
turned off with the following variables (controlled via procfs) */
static int led_type = -1;
static int led_heartbeat = 1;
static int led_diskio = 1;
static int led_lanrxtx = 1;
static char lcd_text[32];
#if 0
#define DPRINTK(x) printk x
#else
#define DPRINTK(x)
#endif
/* define to disable all LED functions */
#undef DISABLE_LEDS
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
#define CALC_ADD(val, comp, add) \
(val<=(comp/8) ? add/16 : val<=(comp/4) ? add/8 : val<=(comp/2) ? add/4 : add)
struct lcd_block {
......@@ -40,8 +69,10 @@ struct lcd_block {
};
/* Structure returned by PDC_RETURN_CHASSIS_INFO */
/* NOTE: we use unsigned long:16 two times, since the following member
lcd_cmd_reg_addr needs to be 64bit aligned on 64bit PA2.0-machines */
struct pdc_chassis_lcd_info_ret_block {
unsigned long model:16; /* DISPLAY_MODEL_XXXX (see below) */
unsigned long model:16; /* DISPLAY_MODEL_XXXX */
unsigned long lcd_width:16; /* width of the LCD in chars (DISPLAY_MODEL_LCD only) */
char *lcd_cmd_reg_addr; /* ptr to LCD cmd-register & data ptr for LED */
char *lcd_data_reg_addr; /* ptr to LCD data-register (LCD only) */
......@@ -56,33 +87,23 @@ struct pdc_chassis_lcd_info_ret_block {
char _pad;
};
/* values for pdc_chassis_lcd_info_ret_block.model: */
#define DISPLAY_MODEL_LCD 0 /* KittyHawk LED or LCD */
#define DISPLAY_MODEL_NONE 1 /* no LED or LCD */
#define DISPLAY_MODEL_LASI 2 /* LASI style 8 bit LED */
#define DISPLAY_MODEL_OLD_ASP 0x7F /* faked: ASP style 8 x 1 bit LED (only very old ASP versions) */
/* LCD_CMD and LCD_DATA for KittyHawk machines */
#ifdef __LP64__
#define KITTYHAWK_LCD_CMD 0xfffffffff0190000L
#else
#define KITTYHAWK_LCD_CMD 0xf0190000
#endif
#define KITTYHAWK_LCD_DATA (KITTYHAWK_LCD_CMD + 1)
#define KITTYHAWK_LCD_CMD (0xfffffffff0190000UL) /* 64bit-ready */
#define KITTYHAWK_LCD_DATA (KITTYHAWK_LCD_CMD+1)
/* lcd_info is pre-initialized to the values needed to program KittyHawk LCD's */
/* lcd_info is pre-initialized to the values needed to program KittyHawk LCD's
* HP seems to have used Sharp/Hitachi HD44780 LCDs most of the time. */
static struct pdc_chassis_lcd_info_ret_block
lcd_info __attribute__((aligned(8))) =
{
model:DISPLAY_MODEL_LCD,
lcd_width:16,
lcd_cmd_reg_addr:(char *) KITTYHAWK_LCD_CMD,
model: DISPLAY_MODEL_LCD,
lcd_width: 16,
lcd_cmd_reg_addr: (char *) KITTYHAWK_LCD_CMD,
lcd_data_reg_addr:(char *) KITTYHAWK_LCD_DATA,
min_cmd_delay:40,
reset_cmd1:0x80,
reset_cmd2:0xc0,
min_cmd_delay: 40,
reset_cmd1: 0x80,
reset_cmd2: 0xc0,
};
......@@ -92,7 +113,143 @@ lcd_info __attribute__((aligned(8))) =
#define LED_DATA_REG lcd_info.lcd_cmd_reg_addr /* LASI & ASP only */
/* ptr to LCD/LED-specific function */
static void (*led_func_ptr) (unsigned char);
#define LED_HASLCD 1
#define LED_NOLCD 0
#ifdef CONFIG_PROC_FS
static int led_proc_read(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
char *out = page;
int len;
switch ((long)data)
{
case LED_NOLCD:
out += sprintf(out, "Heartbeat: %d\n", led_heartbeat);
out += sprintf(out, "Disk IO: %d\n", led_diskio);
out += sprintf(out, "LAN Rx/Tx: %d\n", led_lanrxtx);
break;
case LED_HASLCD:
out += sprintf(out, "%s\n", lcd_text);
break;
default:
*eof = 1;
return 0;
}
len = out - page - off;
if (len < count) {
*eof = 1;
if (len <= 0) return 0;
} else {
len = count;
}
*start = page + off;
return len;
}
static int led_proc_write(struct file *file, const char *buf,
unsigned long count, void *data)
{
char *cur, lbuf[count];
int d;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
memset(lbuf, 0, count);
copy_from_user(lbuf, buf, count);
cur = lbuf;
/* skip initial spaces */
while (*cur && isspace(*cur))
{
cur++;
}
switch ((long)data)
{
case LED_NOLCD:
d = *cur++ - '0';
if (d != 0 && d != 1) goto parse_error;
led_heartbeat = d;
if (*cur++ != ' ') goto parse_error;
d = *cur++ - '0';
if (d != 0 && d != 1) goto parse_error;
led_diskio = d;
if (*cur++ != ' ') goto parse_error;
d = *cur++ - '0';
if (d != 0 && d != 1) goto parse_error;
led_lanrxtx = d;
break;
case LED_HASLCD:
if (*cur == 0)
{
/* reset to default */
lcd_print("Linux " UTS_RELEASE);
}
else
{
/* chop off trailing \n.. if the user gives multiple
* \n then it's all their fault.. */
if (*cur && cur[strlen(cur)-1] == '\n')
cur[strlen(cur)-1] = 0;
lcd_print(cur);
}
break;
default:
return 0;
}
return count;
parse_error:
if ((long)data == LED_NOLCD)
printk(KERN_CRIT "Parse error: expect \"n n n\" (n == 0 or 1) for heartbeat,\ndisk io and lan tx/rx indicators\n");
return -EINVAL;
}
static int __init led_create_procfs(void)
{
struct proc_dir_entry *proc_pdc_root = NULL;
struct proc_dir_entry *ent;
if (led_type == -1) return -1;
proc_pdc_root = proc_mkdir("pdc", 0);
if (!proc_pdc_root) return -1;
proc_pdc_root->owner = THIS_MODULE;
ent = create_proc_entry("led", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root);
if (!ent) return -1;
ent->nlink = 1;
ent->data = (void *)LED_NOLCD; /* LED */
ent->read_proc = led_proc_read;
ent->write_proc = led_proc_write;
ent->owner = THIS_MODULE;
if (led_type == LED_HASLCD)
{
ent = create_proc_entry("lcd", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root);
if (!ent) return -1;
ent->nlink = 1;
ent->data = (void *)LED_HASLCD; /* LCD */
ent->read_proc = led_proc_read;
ent->write_proc = led_proc_write;
ent->owner = THIS_MODULE;
}
return 0;
}
#endif
/*
**
......@@ -132,16 +289,14 @@ static void led_LASI_driver(unsigned char leds)
**
** led_LCD_driver()
**
** The logic of the LCD driver is, that we write at every interrupt
** The logic of the LCD driver is, that we write at every scheduled call
** only to one of LCD_CMD_REG _or_ LCD_DATA_REG - registers.
** That way we don't need to let this interrupt routine busywait
** the "min_cmd_delay", since idlewaiting in an interrupt-routine is
** allways a BAD IDEA !
** That way we don't need to let this tasklet busywait for min_cmd_delay
** milliseconds.
**
** TODO: check the value of "min_cmd_delay" against the value of HZ.
**
*/
static void led_LCD_driver(unsigned char leds)
{
static int last_index; /* 0:heartbeat, 1:disk, 2:lan_in, 3:lan_out */
......@@ -149,8 +304,6 @@ static void led_LCD_driver(unsigned char leds)
struct lcd_block *block_ptr;
int value;
// leds = ~leds; /* needed ? */
switch (last_index) {
case 0: block_ptr = &lcd_info.heartbeat;
value = leds & LED_HEARTBEAT;
......@@ -165,7 +318,6 @@ static void led_LCD_driver(unsigned char leds)
value = leds & LED_LAN_TX;
break;
default: /* should never happen: */
BUG();
return;
}
......@@ -178,179 +330,295 @@ static void led_LCD_driver(unsigned char leds)
}
/* now update the vars for the next interrupt iteration */
if (++last_was_cmd == 2) {
if (++last_was_cmd == 2) { /* switch between cmd & data */
last_was_cmd = 0;
if (++last_index == 4)
last_index = 0;
last_index = 0; /* switch back to heartbeat index */
}
}
/*
**
** led_get_net_stats()
**
** calculate the TX- & RX-troughput on the network interfaces in
** the system for usage in the LED code
**
** (analog to dev_get_info() from net/core/dev.c)
**
*/
static unsigned long led_net_rx_counter, led_net_tx_counter;
static char currentleds; /* stores current value of the LEDs */
static void led_get_net_stats(int addvalue)
{
#ifdef CONFIG_NET
static unsigned long rx_total_last, tx_total_last;
unsigned long rx_total, tx_total;
struct net_device *dev;
struct net_device_stats *stats;
rx_total = tx_total = 0;
/* we are running as a tasklet, so locking dev_base
* for reading should be OK */
read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if (dev->get_stats) {
stats = dev->get_stats(dev);
rx_total += stats->rx_packets;
tx_total += stats->tx_packets;
}
}
read_unlock(&dev_base_lock);
rx_total -= rx_total_last;
tx_total -= tx_total_last;
if (rx_total)
led_net_rx_counter += CALC_ADD(rx_total, tx_total, addvalue);
if (tx_total)
led_net_tx_counter += CALC_ADD(tx_total, rx_total, addvalue);
rx_total_last += rx_total;
tx_total_last += tx_total;
#endif
}
static void (*led_func_ptr) (unsigned char); /* ptr to LCD/LED-specific function */
/*
** led_interrupt_func()
**
** is called at every timer interrupt from time.c,
** led_get_diskio_stats()
**
** calculate the disk-io througput in the system
** (analog to linux/fs/proc/proc_misc.c)
**
*/
static unsigned long led_diskio_counter;
static void led_get_diskio_stats(int addvalue)
{
static unsigned int diskio_total_last, diskio_max;
int major, disk, total;
total = 0;
for (major = 0; major < DK_MAX_MAJOR; major++) {
for (disk = 0; disk < DK_MAX_DISK; disk++)
total += kstat.dk_drive[major][disk];
}
total -= diskio_total_last;
if (total) {
if (total >= diskio_max) {
led_diskio_counter += addvalue;
diskio_max = total; /* new maximum value found */
} else
led_diskio_counter += CALC_ADD(total, diskio_max, addvalue);
}
diskio_total_last += total;
}
/*
** led_tasklet_func()
**
** is scheduled at every timer interrupt from time.c and
** updates the chassis LCD/LED
TODO:
- display load average (older machines like 715/64 have 4 "free" LED's for that)
- optimizations
*/
#define HEARTBEAT_LEN (HZ/16)
static unsigned char currentleds; /* stores current value of the LEDs */
#define HEARTBEAT_LEN (HZ*6/100)
#define HEARTBEAT_2ND_RANGE_START (HZ*22/100)
#define HEARTBEAT_2ND_RANGE_END (HEARTBEAT_2ND_RANGE_START + HEARTBEAT_LEN)
void led_interrupt_func(void)
static void led_tasklet_func(unsigned long unused)
{
#ifndef DISABLE_LEDS
static int count;
static int lastleds = -1;
static int nr;
static unsigned int count, count_HZ;
static unsigned char lastleds;
/* exit, if not initialized */
/* exit if not initialized */
if (!led_func_ptr)
return;
/* increment the local counter */
if (count == (HZ-1))
count = 0;
else
count++;
/* increment the local counters */
++count;
if (++count_HZ == HZ)
count_HZ = 0;
/* calculate the Heartbeat */
if ((count % (HZ/2)) < HEARTBEAT_LEN)
if (led_heartbeat)
{
/* flash heartbeat-LED like a real heart (2 x short then a long delay) */
if (count_HZ<HEARTBEAT_LEN ||
(count_HZ>=HEARTBEAT_2ND_RANGE_START && count_HZ<HEARTBEAT_2ND_RANGE_END))
currentleds |= LED_HEARTBEAT;
else
currentleds &= ~LED_HEARTBEAT;
}
/* gather network and diskio statistics and flash LEDs respectively */
if (led_lanrxtx)
{
if ((count & 31) == 0)
led_get_net_stats(30);
if (led_net_rx_counter) {
led_net_rx_counter--;
currentleds |= LED_LAN_RCV;
}
else
currentleds &= ~LED_LAN_RCV;
/* roll LEDs 0..2 */
if (count == 0) {
if (nr++ >= 2)
nr = 0;
currentleds &= ~7;
currentleds |= (1 << nr);
if (led_net_tx_counter) {
led_net_tx_counter--;
currentleds |= LED_LAN_TX;
}
else
currentleds &= ~LED_LAN_TX;
}
/* now update the LEDs */
if (led_diskio)
{
/* avoid to calculate diskio-stats at same irq as netio-stats ! */
if ((count & 31) == 15)
led_get_diskio_stats(30);
if (led_diskio_counter) {
led_diskio_counter--;
currentleds |= LED_DISK_IO;
}
else
currentleds &= ~LED_DISK_IO;
}
/* update the LCD/LEDs */
if (currentleds != lastleds) {
led_func_ptr(currentleds);
lastleds = currentleds;
}
#endif
}
/* main led tasklet struct (scheduled from time.c) */
DECLARE_TASKLET_DISABLED(led_tasklet, led_tasklet_func, 0);
/*
** led_halt()
**
** called by the reboot notifier chain at shutdown and stops all
** LED/LCD activities.
**
*/
static int led_halt(struct notifier_block *, unsigned long, void *);
static struct notifier_block led_notifier = {
notifier_call: led_halt,
};
static int led_halt(struct notifier_block *nb, unsigned long event, void *buf)
{
char *txt;
switch (event) {
case SYS_RESTART: txt = "SYSTEM RESTART";
break;
case SYS_HALT: txt = "SYSTEM HALT";
break;
case SYS_POWER_OFF: txt = "SYSTEM POWER OFF";
break;
default: return NOTIFY_DONE;
}
/* completely stop the LED/LCD tasklet */
tasklet_disable(&led_tasklet);
if (lcd_info.model == DISPLAY_MODEL_LCD)
lcd_print(txt);
else
if (led_func_ptr)
led_func_ptr(0xff); /* turn all LEDs ON */
unregister_reboot_notifier(&led_notifier);
return NOTIFY_OK;
}
/*
** register_led_driver()
**
** All information in lcd_info needs to be set up prior
** calling this function.
** registers an external LED or LCD for usage by this driver.
** currently only LCD-, LASI- and ASP-style LCD/LED's are supported.
**
*/
static void __init register_led_driver(void)
int __init register_led_driver(int model, char *cmd_reg, char *data_reg)
{
#ifndef DISABLE_LEDS
static int initialized;
if (initialized || !data_reg)
return 1;
lcd_info.model = model; /* store the values */
LCD_CMD_REG = (cmd_reg == LED_CMD_REG_NONE) ? NULL : cmd_reg;
switch (lcd_info.model) {
case DISPLAY_MODEL_LCD:
printk(KERN_INFO "LCD display at (%p,%p)\n",
LCD_DATA_REG = data_reg;
printk(KERN_INFO "LCD display at %p,%p registered\n",
LCD_CMD_REG , LCD_DATA_REG);
led_func_ptr = led_LCD_driver;
lcd_print( "Linux " UTS_RELEASE );
led_type = LED_HASLCD;
break;
case DISPLAY_MODEL_LASI:
printk(KERN_INFO "LED display at %p\n",
LED_DATA_REG);
LED_DATA_REG = data_reg;
led_func_ptr = led_LASI_driver;
printk(KERN_INFO "LED display at %p registered\n", LED_DATA_REG);
led_type = LED_NOLCD;
break;
case DISPLAY_MODEL_OLD_ASP:
printk(KERN_INFO "LED (ASP-style) display at %p\n",
LED_DATA_REG);
LED_DATA_REG = data_reg;
led_func_ptr = led_ASP_driver;
printk(KERN_INFO "LED (ASP-style) display at %p registered\n",
LED_DATA_REG);
led_type = LED_NOLCD;
break;
default:
printk(KERN_ERR "%s: Wrong LCD/LED model %d !\n",
__FUNCTION__, lcd_info.model);
return;
return 1;
}
#endif
}
/*
* XXX - could this move to lasi.c ??
*/
/*
** lasi_led_init()
**
** lasi_led_init() is called from lasi.c with the base hpa
** of the lasi controller chip.
** Since Mirage and Electra machines use a different LED
** address register, we need to check for these machines
** explicitly.
*/
#ifdef CONFIG_GSC_LASI
void __init lasi_led_init(unsigned long lasi_hpa)
{
if (lcd_info.model != DISPLAY_MODEL_NONE ||
lasi_hpa == 0)
return;
printk("%s: CPU_HVERSION %x\n", __FUNCTION__, CPU_HVERSION);
/* Mirage and Electra machines need special offsets */
switch (CPU_HVERSION) {
case 0x60A: /* Mirage Jr (715/64) */
case 0x60B: /* Mirage 100 */
case 0x60C: /* Mirage 100+ */
case 0x60D: /* Electra 100 */
case 0x60E: /* Electra 120 */
LED_DATA_REG = (char *) (lasi_hpa - 0x00020000);
break;
default:
LED_DATA_REG = (char *) (lasi_hpa + 0x0000C000);
break;
} /* switch() */
lcd_info.model = DISPLAY_MODEL_LASI;
register_led_driver();
}
#endif
/* mark the LCD/LED driver now as initialized and
* register to the reboot notifier chain */
initialized++;
register_reboot_notifier(&led_notifier);
/*
** asp_led_init()
**
** asp_led_init() is called from asp.c with the ptr
** to the LED display.
*/
#ifdef CONFIG_GSC_LASI
void __init asp_led_init(unsigned long led_ptr)
{
if (lcd_info.model != DISPLAY_MODEL_NONE ||
led_ptr == 0)
return;
/* start the led tasklet for the first time */
tasklet_enable(&led_tasklet);
lcd_info.model = DISPLAY_MODEL_OLD_ASP;
LED_DATA_REG = (char *) led_ptr;
register_led_driver();
return 0;
}
#endif
/*
** register_led_regions()
**
** Simple function, which registers the LCD/LED regions for /procfs.
** register_led_regions() registers the LCD/LED regions for /procfs.
** At bootup - where the initialisation of the LCD/LED normally happens -
** not all internal structures of request_region() are properly set up,
** so that we delay the registration until busdevice.c is executed.
** so that we delay the led-registration until after busdevices_init()
** has been executed.
**
*/
......@@ -358,17 +626,57 @@ void __init register_led_regions(void)
{
switch (lcd_info.model) {
case DISPLAY_MODEL_LCD:
request_region((unsigned long)LCD_CMD_REG, 1, "lcd_cmd");
request_region((unsigned long)LCD_DATA_REG, 1, "lcd_data");
request_mem_region((unsigned long)LCD_CMD_REG, 1, "lcd_cmd");
request_mem_region((unsigned long)LCD_DATA_REG, 1, "lcd_data");
break;
case DISPLAY_MODEL_LASI:
case DISPLAY_MODEL_OLD_ASP:
request_region((unsigned long)LED_DATA_REG, 1, "led_data");
request_mem_region((unsigned long)LED_DATA_REG, 1, "led_data");
break;
}
}
/*
**
** lcd_print()
**
** Displays the given string on the LCD-Display of newer machines.
** lcd_print() disables the timer-based led tasklet during its
** execution and enables it afterwards again.
**
*/
int lcd_print( char *str )
{
int i;
if (!led_func_ptr || lcd_info.model != DISPLAY_MODEL_LCD)
return 0;
/* temporarily disable the led tasklet */
tasklet_disable(&led_tasklet);
/* copy display string to buffer for procfs */
strncpy(lcd_text, str, sizeof(lcd_text)-1);
/* Set LCD Cursor to 1st character */
gsc_writeb(lcd_info.reset_cmd1, LCD_CMD_REG);
udelay(lcd_info.min_cmd_delay);
/* Print the string */
for (i=0; i < lcd_info.lcd_width; i++) {
if (str && *str)
gsc_writeb(*str++, LCD_DATA_REG);
else
gsc_writeb(' ', LCD_DATA_REG);
udelay(lcd_info.min_cmd_delay);
}
/* re-enable the led tasklet */
tasklet_enable(&led_tasklet);
return lcd_info.lcd_width;
}
/*
** led_init()
......@@ -384,10 +692,8 @@ void __init register_led_regions(void)
int __init led_init(void)
{
#ifndef DISABLE_LEDS
long pdc_result[32];
printk("%s: CPU_HVERSION %x\n", __FUNCTION__, CPU_HVERSION);
struct pdc_chassis_info chassis_info;
int ret;
/* Work around the buggy PDC of KittyHawk-machines */
switch (CPU_HVERSION) {
......@@ -396,56 +702,74 @@ int __init led_init(void)
case 0x582: /* KittyHawk DC3 100 (K400) */
case 0x583: /* KittyHawk DC3 120 (K410) */
case 0x58B: /* KittyHawk DC2 100 (K200) */
printk("%s: KittyHawk-Machine found !!\n", __FUNCTION__);
printk(KERN_INFO "%s: KittyHawk-Machine (hversion 0x%x) found, "
"LED detection skipped.\n", __FILE__, CPU_HVERSION);
goto found; /* use the preinitialized values of lcd_info */
default:
break;
}
/* initialize pdc_result, so we can check the return values of pdc_chassis_info() */
pdc_result[0] = pdc_result[1] = 0;
if (pdc_chassis_info(&pdc_result, &lcd_info, sizeof(lcd_info)) == PDC_OK) {
printk("%s: chassis info: model %d, ret0=%d, ret1=%d\n",
__FUNCTION__, lcd_info.model, pdc_result[0], pdc_result[1]);
/* initialize the struct, so that we can check for valid return values */
lcd_info.model = DISPLAY_MODEL_NONE;
chassis_info.actcnt = chassis_info.maxcnt = 0;
if ((ret = pdc_chassis_info(&chassis_info, &lcd_info, sizeof(lcd_info))) == PDC_OK) {
DPRINTK((KERN_INFO "%s: chassis info: model=%d (%s), "
"lcd_width=%d, cmd_delay=%u,\n"
"%s: sizecnt=%d, actcnt=%ld, maxcnt=%ld\n",
__FILE__, lcd_info.model,
(lcd_info.model==DISPLAY_MODEL_LCD) ? "LCD" :
(lcd_info.model==DISPLAY_MODEL_LASI) ? "LED" : "unknown",
lcd_info.lcd_width, lcd_info.min_cmd_delay,
__FILE__, sizeof(lcd_info),
chassis_info.actcnt, chassis_info.maxcnt));
DPRINTK((KERN_INFO "%s: cmd=%p, data=%p, reset1=%x, reset2=%x, act_enable=%d\n",
__FILE__, lcd_info.lcd_cmd_reg_addr,
lcd_info.lcd_data_reg_addr, lcd_info.reset_cmd1,
lcd_info.reset_cmd2, lcd_info.act_enable ));
/* check the results. Some machines have a buggy PDC */
if (pdc_result[0] <= 0 || pdc_result[0] != pdc_result[1])
if (chassis_info.actcnt <= 0 || chassis_info.actcnt != chassis_info.maxcnt)
goto not_found;
switch (lcd_info.model) {
case DISPLAY_MODEL_LCD: /* LCD display */
if (pdc_result[0] != sizeof(struct pdc_chassis_lcd_info_ret_block)
&& pdc_result[0] != sizeof(struct pdc_chassis_lcd_info_ret_block) - 1)
if (chassis_info.actcnt <
offsetof(struct pdc_chassis_lcd_info_ret_block, _pad)-1)
goto not_found;
printk("%s: min_cmd_delay = %d uS\n",
__FUNCTION__, lcd_info.min_cmd_delay);
if (!lcd_info.act_enable) {
DPRINTK((KERN_INFO "PDC prohibited usage of the LCD.\n"));
goto not_found;
}
break;
case DISPLAY_MODEL_NONE: /* no LED or LCD available */
printk(KERN_INFO "PDC reported no LCD or LED.\n");
goto not_found;
case DISPLAY_MODEL_LASI: /* Lasi style 8 bit LED display */
if (pdc_result[0] != 8 && pdc_result[0] != 32)
if (chassis_info.actcnt != 8 && chassis_info.actcnt != 32)
goto not_found;
break;
default:
printk(KERN_WARNING "Unknown LCD/LED model %d\n",
printk(KERN_WARNING "PDC reported unknown LCD/LED model %d\n",
lcd_info.model);
goto not_found;
} /* switch() */
found:
/* register the LCD/LED driver */
register_led_driver();
register_led_driver(lcd_info.model, LCD_CMD_REG, LCD_DATA_REG);
return 0;
} /* if() */
} else { /* if() */
DPRINTK((KERN_INFO "pdc_chassis_info call failed with retval = %d\n", ret));
}
not_found:
lcd_info.model = DISPLAY_MODEL_NONE;
return 1;
#endif
}
#ifdef CONFIG_PROC_FS
module_init(led_create_procfs)
#endif
/*
* linux/arch/parisc/kernel/power.c
* HP PARISC soft power switch support driver
*
* Copyright (c) 2001-2002 Helge Deller <deller@gmx.de>
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
*
*
*
* HINT:
* Support of the soft power switch button may be enabled or disabled at
* runtime through the "/proc/sys/kernel/power" procfs entry.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <asm/pdc.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/led.h>
#include <asm/uaccess.h>
#ifdef DEBUG
# define DPRINTK(x) printk x
#else
# define DPRINTK(x) do { } while (0)
#endif
/* filename in /proc which can be used to enable/disable the power switch */
#define SYSCTL_FILENAME "sys/kernel/power"
#define DIAG_CODE(code) (0x14000000 + ((code)<<5))
/* this will go to processor.h or any other place... */
/* taken from PCXL ERS page 82 */
#define MFCPU_X(rDiagReg, t_ch, t_th, code) \
(DIAG_CODE(code) + ((rDiagReg)<<21) + ((t_ch)<<16) + ((t_th)<<0) )
#define MTCPU(dr, gr) MFCPU_X(dr, gr, 0, 0x12) /* move value of gr to dr[dr] */
#define MFCPU_C(dr, gr) MFCPU_X(dr, gr, 0, 0x30) /* for dr0 and dr8 only ! */
#define MFCPU_T(dr, gr) MFCPU_X(dr, 0, gr, 0xa0) /* all dr except dr0 and dr8 */
#define __getDIAG(dr) ( { \
register unsigned long __res asm("r28");\
__asm__ __volatile__ ( \
".word %1\n nop\n" : "=&r" (__res) : "i" (MFCPU_T(dr,28)) \
); \
__res; \
} )
static void deferred_poweroff(void *dummy)
{
extern int cad_pid; /* from kernel/sys.c */
if (kill_proc(cad_pid, SIGINT, 1)) {
/* just in case killing init process failed */
machine_power_off();
}
}
/*
* This function gets called from interrupt context.
* As it's called within an interrupt, it wouldn't sync if we don't
* use schedule_work().
*/
static DECLARE_WORK(poweroff_work, deferred_poweroff, NULL);
static void poweroff(void)
{
static int powering_off;
if (powering_off)
return;
powering_off++;
schedule_work(&poweroff_work);
}
/* local time-counter for shutdown */
static int shutdown_timer;
/* check, give feedback and start shutdown after one second */
static void process_shutdown(void)
{
if (shutdown_timer == 0)
DPRINTK((KERN_INFO "Shutdown requested...\n"));
shutdown_timer++;
/* wait until the button was pressed for 1 second */
if (shutdown_timer == HZ) {
static char msg[] = "Shutting down...";
DPRINTK((KERN_INFO "%s\n", msg));
#ifdef CONFIG_CHASSIS_LCD_LED
lcd_print(msg);
#endif
poweroff();
}
}
/* main power switch tasklet struct (scheduled from time.c) */
DECLARE_TASKLET_DISABLED(power_tasklet, NULL, 0);
/* soft power switch enabled/disabled */
#ifdef CONFIG_PROC_FS
static int pwrsw_enabled = 1;
#else
#define pwrsw_enabled (1)
#endif
/*
* On gecko style machines (e.g. 712/xx and 715/xx)
* the power switch status is stored in Bit 0 ("the highest bit")
* of CPU diagnose register 25.
*
*/
static void gecko_tasklet_func(unsigned long unused)
{
if (!pwrsw_enabled)
return;
if (__getDIAG(25) & 0x80000000) {
/* power switch button not pressed or released again */
/* Warning: Some machines do never reset this DIAG flag! */
shutdown_timer = 0;
} else {
process_shutdown();
}
}
/*
* Check the power switch status which is read from the
* real I/O location at soft_power_reg.
* Bit 31 ("the lowest bit) is the status of the power switch.
*/
static void polling_tasklet_func(unsigned long soft_power_reg)
{
unsigned long current_status;
if (!pwrsw_enabled)
return;
current_status = gsc_readl(soft_power_reg);
if (current_status & 0x1) {
/* power switch button not pressed */
shutdown_timer = 0;
} else {
process_shutdown();
}
}
/*
* powerfail interruption handler (irq IRQ_FROM_REGION(CPU_IRQ_REGION)+2)
*/
#if 0
static void powerfail_interrupt(int code, void *x, struct pt_regs *regs)
{
printk(KERN_CRIT "POWERFAIL INTERRUPTION !\n");
poweroff();
}
#endif
/*
* /proc filesystem support
*/
#ifdef CONFIG_SYSCTL
static int power_proc_read(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
char *out = page;
int len;
out += sprintf(out, "Software power switch support: ");
out += sprintf(out, pwrsw_enabled ? "enabled (1)" : "disabled (0)" );
out += sprintf(out, "\n");
len = out - page - off;
if (len < count) {
*eof = 1;
if (len <= 0) return 0;
} else {
len = count;
}
*start = page + off;
return len;
}
static int power_proc_write(struct file *file, const char *buf,
unsigned long count, void *data)
{
char *cur, lbuf[count];
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
memset(lbuf, 0, count);
copy_from_user(lbuf, buf, count);
cur = lbuf;
/* skip initial spaces */
while (*cur && isspace(*cur))
cur++;
switch (*cur) {
case '0': pwrsw_enabled = 0;
break;
case '1': pwrsw_enabled = 1;
break;
default: printk(KERN_CRIT "/proc/" SYSCTL_FILENAME
": Parse error: only '0' or '1' allowed!\n");
return -EINVAL;
} /* switch() */
return count;
}
static struct proc_dir_entry *ent;
static void __init power_create_procfs(void)
{
if (!power_tasklet.func)
return;
ent = create_proc_entry(SYSCTL_FILENAME, S_IFREG|S_IRUGO|S_IWUSR, 0);
if (!ent) return;
ent->nlink = 1;
ent->read_proc = power_proc_read;
ent->write_proc = power_proc_write;
ent->owner = THIS_MODULE;
}
static void __exit power_remove_procfs(void)
{
remove_proc_entry(SYSCTL_FILENAME, NULL);
}
#else
#define power_create_procfs() do { } while (0)
#define power_remove_procfs() do { } while (0)
#endif /* CONFIG_SYSCTL */
/* parisc_panic_event() is called by the panic handler.
* As soon as a panic occurs, our tasklets above will not be
* executed any longer. This function then re-enables the
* soft-power switch and allows the user to switch off the system
*/
static int parisc_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
/* re-enable the soft-power switch */
pdc_soft_power_button(0);
return NOTIFY_DONE;
}
static struct notifier_block parisc_panic_block = {
notifier_call: parisc_panic_event,
priority: INT_MAX,
};
static int __init power_init(void)
{
unsigned long ret;
unsigned long soft_power_reg = 0;
#if 0
request_irq( IRQ_FROM_REGION(CPU_IRQ_REGION)+2, &powerfail_interrupt,
0, "powerfail", NULL);
#endif
/* enable the soft power switch if possible */
ret = pdc_soft_power_info(&soft_power_reg);
if (ret == PDC_OK)
ret = pdc_soft_power_button(1);
if (ret != PDC_OK)
soft_power_reg = -1UL;
switch (soft_power_reg) {
case 0: printk(KERN_INFO "Gecko-style soft power switch enabled.\n");
power_tasklet.func = gecko_tasklet_func;
break;
case -1UL: printk(KERN_INFO "Soft power switch support not available.\n");
return -ENODEV;
default: printk(KERN_INFO "Soft power switch enabled, polling @ 0x%08lx.\n",
soft_power_reg);
power_tasklet.data = soft_power_reg;
power_tasklet.func = polling_tasklet_func;
}
/* Register a call for panic conditions. */
notifier_chain_register(&panic_notifier_list, &parisc_panic_block);
power_create_procfs();
tasklet_enable(&power_tasklet);
return 0;
}
static void __exit power_exit(void)
{
if (!power_tasklet.func)
return;
tasklet_disable(&power_tasklet);
notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block);
power_remove_procfs();
power_tasklet.func = NULL;
pdc_soft_power_button(0);
}
module_init(power_init);
module_exit(power_exit);
MODULE_AUTHOR("Helge Deller");
MODULE_DESCRIPTION("Soft power switch driver");
MODULE_LICENSE("Dual BSD/GPL");
EXPORT_NO_SYMBOLS;
......@@ -15,7 +15,6 @@
** This module initializes the IOC (I/O Controller) found on B1000/C3000/
** J5000/J7000/N-class/L-class machines and their successors.
**
** FIXME: Multi-IOC support missing - depends on hp_device data
** FIXME: add DMA hint support programming in both sba and lba modules.
*/
......@@ -28,7 +27,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#define PCI_DEBUG /* for ASSERT */
#undef PCI_DEBUG /* for ASSERT */
#include <linux/pci.h>
#undef PCI_DEBUG
......@@ -36,19 +35,18 @@
#include <asm/io.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
#include <asm/hardware.h> /* for register_driver() stuff */
#include <asm/gsc.h> /* FIXME: for gsc_read/gsc_write */
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <linux/proc_fs.h>
#include <asm/runway.h> /* for proc_runway_root */
#include <asm/pdc.h> /* for PDC_MODEL_* */
#define MODULE_NAME "SBA"
/*
** The number of debug flags is a clue - this code is fragile.
** Don't even think about messing with it unless you have
** plenty of 710's to sacrafice to the computer gods. :^)
** plenty of 710's to sacrifice to the computer gods. :^)
*/
#undef DEBUG_SBA_INIT
#undef DEBUG_SBA_RUN
......@@ -56,12 +54,9 @@
#undef DEBUG_SBA_RESOURCE
#undef ASSERT_PDIR_SANITY
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_DMB_TRAP
#if 1
#define SBA_INLINE
#else
#define SBA_INLINE __inline__
#endif
#ifdef DEBUG_SBA_INIT
#define DBG_INIT(x...) printk(x)
......@@ -95,11 +90,7 @@
** allocated and free'd/purged at a time might make this
** less interesting).
*/
#if 0
#define DELAYED_RESOURCE_CNT 16
#else
#undef DELAYED_RESOURCE_CNT
#endif
#define DEFAULT_DMA_HINT_REG 0
......@@ -109,59 +100,33 @@
#define IKE_MERCED_PORT 0x803
#define IKE_ROPES_PORT 0x781
int sba_driver_callback(struct hp_device *, struct pa_iodc_driver *);
static struct pa_iodc_driver sba_drivers_for[] = {
/* FIXME: why is SVERSION checked? */
{HPHW_IOA, ASTRO_RUNWAY_PORT, 0x0, 0xb, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "I/O MMU", (void *) sba_driver_callback},
{HPHW_BCPORT, ASTRO_ROPES_PORT, 0x0, 0xb, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "I/O MMU", (void *) sba_driver_callback},
#if 0
/* FIXME : N-class! Use a different "callback"? */
{HPHW_BCPORT, IKE_MERCED_PORT, 0x0, 0xb, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "I/O MMU", (void *) sba_driver_callback},
{HPHW_BCPORT, IKE_ROPES_PORT, 0x0, 0xb, 0, 0x10,
DRIVER_CHECK_HVERSION +
DRIVER_CHECK_SVERSION + DRIVER_CHECK_HWTYPE,
MODULE_NAME, "I/O MMU", (void *) sba_driver_callback},
#endif
{0,0,0,0,0,0,
0,
(char *) NULL, (char *) NULL, (void *) NULL }
};
#define REO_MERCED_PORT 0x804
#define REO_ROPES_PORT 0x782
#define REOG_MERCED_PORT 0x805
#define REOG_ROPES_PORT 0x783
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define IS_ASTRO(id) ( \
(((id)->hw_type == HPHW_IOA) && ((id)->hversion == ASTRO_RUNWAY_PORT)) || \
(((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == ASTRO_ROPES_PORT)) \
)
#define IS_ASTRO(id) \
(((id)->hversion == ASTRO_RUNWAY_PORT) || ((id)->hversion == ASTRO_ROPES_PORT))
#define CONFIG_FUNC_SIZE 4096 /* SBA configuration function reg set */
#define IS_IKE(id) \
(((id)->hversion == IKE_MERCED_PORT) || ((id)->hversion == IKE_ROPES_PORT))
#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
#define ASTRO_IOC_OFFSET 0x20000
/* Ike's IOC's occupy functions 2 and 3 (not 0 and 1) */
#define IKE_IOC_OFFSET(p) ((p+2)*CONFIG_FUNC_SIZE)
#define IKE_IOC_OFFSET(p) ((p+2)*SBA_FUNC_SIZE)
#define IOC_CTRL 0x8 /* IOC_CTRL offset */
#define IOC_CTRL_TE (0x1 << 0) /* TOC Enable */
#define IOC_CTRL_RM (0x1 << 8) /* Real Mode */
#define IOC_CTRL_NC (0x1 << 9) /* Non Coherent Mode */
#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
#define IOC_CTRL_RM (1 << 8) /* Real Mode */
#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
#define MAX_IOC 2 /* per Ike. Astro only has 1 */
......@@ -233,18 +198,22 @@ static struct pa_iodc_driver sba_drivers_for[] = {
struct ioc {
char *ioc_hpa; /* I/O MMU base address */
unsigned long ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
unsigned long *res_hint; /* next available IOVP - circular search */
unsigned long *res_hint; /* next avail IOVP - circular search */
spinlock_t res_lock;
unsigned long hint_mask_pdir; /* bits used for DMA hints */
unsigned int res_bitshift; /* from the LEFT! */
unsigned int res_size; /* size of resource map in bytes */
unsigned int hint_shift_pdir;
spinlock_t res_lock;
unsigned long hint_mask_pdir; /* bits used for DMA hints */
#ifdef DELAYED_RESOURCE_CNT
dma_addr_t res_delay[DELAYED_RESOURCE_CNT];
#if DELAYED_RESOURCE_CNT > 0
int saved_cnt;
struct sba_dma_pair {
dma_addr_t iova;
size_t size;
} saved[DELAYED_RESOURCE_CNT];
#endif
#ifdef CONFIG_PROC_FS
......@@ -269,9 +238,11 @@ struct ioc {
};
struct sba_device {
struct sba_device *next; /* list of LBA's in system */
struct hp_device *iodc; /* data about dev from firmware */
char *sba_hpa; /* base address */
struct sba_device *next; /* list of SBA's in system */
struct parisc_device *dev; /* dev found in bus walk */
struct parisc_device_id *iodc; /* data about dev from firmware */
const char *name;
unsigned long sba_hpa; /* base address */
spinlock_t sba_lock;
unsigned int flags; /* state/functionality enabled */
unsigned int hw_rev; /* HW revision of chip */
......@@ -282,10 +253,17 @@ struct sba_device {
static struct sba_device *sba_list;
static int sba_count;
static unsigned long ioc_needs_fdc = 0;
/* Ratio of Host MEM to IOV Space size */
static unsigned long sba_mem_ratio = 4;
static unsigned long sba_mem_ratio = 8;
/* global count of IOMMUs in the system */
static unsigned int global_ioc_cnt = 0;
/* PA8700 (Piranha 2.2) bug workaround */
static unsigned long piranha_bad_128k = 0;
/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))
......@@ -299,73 +277,75 @@ static unsigned long sba_mem_ratio = 4;
**
** BE WARNED: register writes are posted.
** (ie follow writes which must reach HW with a read)
**
** Superdome (in particular, REO) allows only 64-bit CSR accesses.
*/
#define READ_U8(addr) gsc_readb(addr)
#define READ_U16(addr) gsc_readw((u16 *) (addr))
#define READ_U32(addr) gsc_readl((u32 *) (addr))
#define WRITE_U8(value, addr) gsc_writeb(value, addr)
#define WRITE_U16(value, addr) gsc_writew(value, (u16 *) (addr))
#define WRITE_U32(value, addr) gsc_writel(value, (u32 *) (addr))
#define READ_REG8(addr) gsc_readb(addr)
#define READ_REG16(addr) le16_to_cpu(gsc_readw((u16 *) (addr)))
#define READ_REG32(addr) le32_to_cpu(gsc_readl((u32 *) (addr)))
#define READ_REG64(addr) le64_to_cpu(gsc_readq((u64 *) (addr)))
#define WRITE_REG8(value, addr) gsc_writeb(value, addr)
#define WRITE_REG16(value, addr) gsc_writew(cpu_to_le16(value), (u16 *) (addr))
#define WRITE_REG32(value, addr) gsc_writel(cpu_to_le32(value), (u32 *) (addr))
#define WRITE_REG64(value, addr) gsc_writeq(cpu_to_le64(value), (u64 *) (addr))
#define READ_REG32(addr) le32_to_cpu(__raw_readl(addr))
#define READ_REG64(addr) le64_to_cpu(__raw_readq(addr))
#define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr)
#define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr)
#ifdef __LP64__
#define READ_REG(addr) READ_REG64(addr)
#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
#else
#define READ_REG(addr) READ_REG32(addr)
#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
#endif
#ifdef DEBUG_SBA_INIT
/* NOTE: When __LP64__ isn't defined, READ_REG64() is two 32-bit reads */
/**
* sba_dump_ranges - debugging only - print ranges assigned to this IOA
* @hpa: base address of the sba
*
* Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO
* IO Adapter (aka Bus Converter).
*/
static void
sba_dump_ranges(char *hpa)
sba_dump_ranges(unsigned long hpa)
{
printk("SBA at 0x%p\n", hpa);
printk("IOS_DIST_BASE : %08x %08x\n",
READ_REG32(hpa+IOS_DIST_BASE+4),
READ_REG32(hpa+IOS_DIST_BASE));
printk("IOS_DIST_MASK : %08x %08x\n",
READ_REG32(hpa+IOS_DIST_MASK+4),
READ_REG32(hpa+IOS_DIST_MASK));
printk("IOS_DIST_ROUTE : %08x %08x\n",
READ_REG32(hpa+IOS_DIST_ROUTE+4),
READ_REG32(hpa+IOS_DIST_ROUTE));
printk("\n");
printk("IOS_DIRECT_BASE : %08x %08x\n",
READ_REG32(hpa+IOS_DIRECT_BASE+4),
READ_REG32(hpa+IOS_DIRECT_BASE));
printk("IOS_DIRECT_MASK : %08x %08x\n",
READ_REG32(hpa+IOS_DIRECT_MASK+4),
READ_REG32(hpa+IOS_DIRECT_MASK));
printk("IOS_DIRECT_ROUTE: %08x %08x\n",
READ_REG32(hpa+IOS_DIRECT_ROUTE+4),
READ_REG32(hpa+IOS_DIRECT_ROUTE));
DBG_INIT("SBA at 0x%lx\n", hpa);
DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
DBG_INIT("\n");
DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
}
/**
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* @hpa: base address of the IOMMU
*
* Print the size/location of the IO MMU PDIR.
*/
static void
sba_dump_tlb(char *hpa)
sba_dump_tlb(unsigned long hpa)
{
printk("IO TLB at 0x%p\n", hpa);
printk("IOC_IBASE : %08x %08x\n",
READ_REG32(hpa+IOC_IBASE+4),
READ_REG32(hpa+IOC_IBASE));
printk("IOC_IMASK : %08x %08x\n",
READ_REG32(hpa+IOC_IMASK+4),
READ_REG32(hpa+IOC_IMASK));
printk("IOC_TCNFG : %08x %08x\n",
READ_REG32(hpa+IOC_TCNFG+4),
READ_REG32(hpa+IOC_TCNFG));
printk("IOC_PDIR_BASE: %08x %08x\n",
READ_REG32(hpa+IOC_PDIR_BASE+4),
READ_REG32(hpa+IOC_PDIR_BASE));
printk("\n");
DBG_INIT("IO TLB at 0x%lx\n", hpa);
DBG_INIT("IOC_IBASE : %Lx\n", READ_REG64(hpa+IOC_IBASE));
DBG_INIT("IOC_IMASK : %Lx\n", READ_REG64(hpa+IOC_IMASK));
DBG_INIT("IOC_TCNFG : %Lx\n", READ_REG64(hpa+IOC_TCNFG));
DBG_INIT("IOC_PDIR_BASE: %Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
DBG_INIT("\n");
}
#endif
#ifdef ASSERT_PDIR_SANITY
/**
* sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
* @pide: pdir index.
*
* Print one entry of the IO MMU PDIR in human readable form.
*/
static void
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
......@@ -374,24 +354,30 @@ sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
uint rcnt;
printk("SBA: %s rp %p bit %d rval 0x%lx\n",
printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
msg,
rptr, pide & (BITS_PER_LONG - 1), *rptr);
rcnt = 0;
while (rcnt < BITS_PER_LONG) {
printk("%s %2d %p %016Lx\n",
printk(KERN_DEBUG "%s %2d %p %016Lx\n",
(rcnt == (pide & (BITS_PER_LONG - 1)))
? " -->" : " ",
rcnt, ptr, *ptr );
rcnt++;
ptr++;
}
printk(msg);
printk(KERN_DEBUG "%s", msg);
}
/* Verify the resource map and pdir state is consistent */
/**
* sba_check_pdir - debugging only - consistency checker
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
*
* Verify the resource map and pdir state is consistent
*/
static int
sba_check_pdir(struct ioc *ioc, char *msg)
{
......@@ -428,15 +414,23 @@ sba_check_pdir(struct ioc *ioc, char *msg)
}
/**
* sba_dump_sg - debugging only - print Scatter-Gather list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: head of the SG list
* @nents: number of entries in SG list
*
* print the SG list so we can verify it's correct by hand.
*/
static void
sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{
while (nents-- > 0) {
printk(" %d : %08lx/%05x %p/%05x\n",
printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
nents,
(unsigned long) sg_dma_address(startsg),
sg_dma_len(startsg),
startsg->address, startsg->length);
sg_virt_addr(startsg), startsg->length);
startsg++;
}
}
......@@ -445,25 +439,6 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
/*
** One time initialization to let the world know the LBA was found.
** This is the only routine which is NOT static.
** Must be called exactly once before pci_init().
*/
void __init
sba_init(void)
{
sba_list = (struct sba_device *) NULL;
sba_count = 0;
#ifdef DEBUG_SBA_INIT
sba_dump_ranges((char *) 0xFED00000L);
#endif
register_driver(sba_drivers_for);
}
/**************************************************************
*
......@@ -489,13 +464,15 @@ sba_init(void)
#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
/*
** Perf optimizations:
** o search for log2(size) bits at a time.
**
** Search should use register width as "stride" to search the res_map.
*/
/**
* sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need.
*
* Find consecutive free bits in resource bitmap.
* Each bit represents one entry in the IO Pdir.
* Cool perf optimization: search for log2(size) bits at a time.
*/
static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
{
......@@ -512,7 +489,6 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
*res_ptr = RESMAP_MASK(bits_wanted);
pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
pide <<= 3; /* convert to bit address */
ASSERT(0 != pide);
break;
}
}
......@@ -536,7 +512,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
}
mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
DBG_RES("sba_search_bitmap() o %ld %p", o, res_ptr);
DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
while(res_ptr < res_end)
{
DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
......@@ -546,7 +522,6 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
pide <<= 3; /* convert to bit address */
pide += bitshiftcnt;
ASSERT(0 != pide);
break;
}
mask >>= o;
......@@ -562,11 +537,24 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
}
/* wrapped ? */
ioc->res_hint = (res_end == res_ptr) ? (unsigned long *) ioc->res_map : res_ptr;
if (res_end <= res_ptr) {
ioc->res_hint = (unsigned long *) ioc->res_map;
ioc->res_bitshift = 0;
} else {
ioc->res_hint = res_ptr;
}
return (pide);
}
/**
* sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @size: number of bytes to create a mapping for
*
* Given a size, find consecutive unmarked and then mark those bits in the
* resource bit map.
*/
static int
sba_alloc_range(struct ioc *ioc, size_t size)
{
......@@ -577,8 +565,8 @@ sba_alloc_range(struct ioc *ioc, size_t size)
unsigned long pide;
ASSERT(pages_needed);
ASSERT((pages_needed * IOVP_SIZE) < DMA_CHUNK_SIZE);
ASSERT(pages_needed < BITS_PER_LONG);
ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE);
ASSERT(pages_needed <= BITS_PER_LONG);
ASSERT(0 == (size & ~IOVP_MASK));
/*
......@@ -590,7 +578,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
if (pide >= (ioc->res_size << 3)) {
pide = sba_search_bitmap(ioc, pages_needed);
if (pide >= (ioc->res_size << 3))
panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", ioc->ioc_hpa);
panic(__FILE__ ": I/O MMU @ %lx is out of mapping resources\n", ioc->ioc_hpa);
}
#ifdef ASSERT_PDIR_SANITY
......@@ -600,8 +588,8 @@ sba_alloc_range(struct ioc *ioc, size_t size)
}
#endif
DBG_RES("sba_alloc_range(%x) %d -> %lx hint %x/%x\n",
size, pages_needed, pide,
DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
__FUNCTION__, size, pages_needed, pide,
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift );
......@@ -622,9 +610,14 @@ sba_alloc_range(struct ioc *ioc, size_t size)
}
/*
** clear bits in the ioc's resource map
*/
/**
* sba_free_range - unmark bits in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO virtual address which was previously allocated.
* @size: number of bytes to create a mapping for
*
* clear bits in the ioc's resource map
*/
static SBA_INLINE void
sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
{
......@@ -638,8 +631,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
DBG_RES("sba_free_range( ,%x,%x) %x/%lx %x %p %lx\n",
(uint) iova, size,
DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
__FUNCTION__, (uint) iova, size,
bits_not_wanted, m, pide, res_ptr, *res_ptr);
#ifdef CONFIG_PROC_FS
......@@ -648,8 +641,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
ASSERT(m != 0);
ASSERT(bits_not_wanted);
ASSERT((bits_not_wanted * IOVP_SIZE) < DMA_CHUNK_SIZE);
ASSERT(bits_not_wanted < BITS_PER_LONG);
ASSERT((bits_not_wanted * IOVP_SIZE) <= DMA_CHUNK_SIZE);
ASSERT(bits_not_wanted <= BITS_PER_LONG);
ASSERT((*res_ptr & m) == m); /* verify same bits are set */
*res_ptr &= ~m;
}
......@@ -667,31 +660,37 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
typedef unsigned long space_t;
#define KERNEL_SPACE 0
/*
* SBA Mapping Routine
*
* Given a virtual address (vba, arg2) and space id, (sid, arg1)
* sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
* pdir_ptr (arg0). Each IO Pdir entry consists of 8 bytes as
* shown below (MSB == bit 0):
*
* 0 19 51 55 63
* +-+---------------------+----------------------------------+----+--------+
* |V| U | PPN[43:12] | U | VI |
* +-+---------------------+----------------------------------+----+--------+
*
* V == Valid Bit
* U == Unused
* PPN == Physical Page Number
* VI == Virtual Index (aka Coherent Index)
*
* The physical address fields are filled with the results of the LPA
* instruction. The virtual index field is filled with the results of
* of the LCI (Load Coherence Index) instruction. The 8 bits used for
* the virtual index are bits 12:19 of the value returned by LCI.
*
* We need to pre-swap the bytes since PCX-W is Big Endian.
*/
/**
* sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO PDIR entry
* @sid: process Space ID
* @vba: Virtual CPU address of buffer to map
*
* SBA Mapping Routine
*
* Given a virtual address (vba, arg2) and space id, (sid, arg1)
* sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
* pdir_ptr (arg0). Each IO Pdir entry consists of 8 bytes as
* shown below (MSB == bit 0):
*
* 0 19 51 55 63
* +-+---------------------+----------------------------------+----+--------+
* |V| U | PPN[43:12] | U | VI |
* +-+---------------------+----------------------------------+----+--------+
*
* V == Valid Bit
* U == Unused
* PPN == Physical Page Number
* VI == Virtual Index (aka Coherent Index)
*
* The physical address fields are filled with the results of the LPA
* instruction. The virtual index field is filled with the results of
* of the LCI (Load Coherence Index) instruction. The 8 bits used for
* the virtual index are bits 12:19 of the value returned by LCI.
*
* We need to pre-swap the bytes since PCX-W is Big Endian.
*/
void SBA_INLINE
sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba)
......@@ -699,9 +698,11 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba)
u64 pa; /* physical address */
register unsigned ci; /* coherent index */
/* We currently only support kernel addresses */
ASSERT(sid == 0);
ASSERT(((unsigned long) vba & 0xc0000000UL) == 0xc0000000UL);
/* We currently only support kernel addresses.
* fdc instr below will need to reload sr1 with KERNEL_SPACE
* once we try to support direct DMA to user space.
*/
ASSERT(sid == KERNEL_SPACE);
pa = virt_to_phys(vba);
pa &= ~4095ULL; /* clear out offset bits */
......@@ -712,25 +713,41 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba)
pa |= 0x8000000000000000ULL; /* set "valid" bit */
*pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
* (bit #61, big endian), we have to flush and sync every time
* IO-PDIR is changed in Ike/Astro.
*/
if (ioc_needs_fdc) {
asm volatile("fdc 0(%%sr1,%0)\n\tsync" : : "r" (pdir_ptr));
}
}
/***********************************************************
* The Ike PCOM (Purge Command Register) is to purge
* stale entries in the IO TLB when unmapping entries.
/**
* sba_mark_invalid - invalidate one or more IO PDIR entries
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO Virtual Address mapped earlier
* @byte_cnt: number of bytes this mapping covers.
*
* Marking the IO PDIR entry(ies) as Invalid and invalidate
* corresponding IO TLB entry. The Ike PCOM (Purge Command Register)
* is to purge stale entries in the IO TLB when unmapping entries.
*
* The PCOM register supports purging of multiple pages, with a minium
* of 1 page and a maximum of 2GB. Hardware requires the address be
* aligned to the size of the range being purged. The size of the range
* must be a power of 2.
***********************************************************/
* must be a power of 2. The "Cool perf optimization" in the
* allocation routine helps keep that true.
*/
static SBA_INLINE void
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
/* Even though this is a big-endian machine, the entries
** in the iopdir are swapped. That's why we clear the byte
** in the iopdir are little endian. That's why we clear the byte
** at +7 instead of at +0.
*/
int off = PDIR_INDEX(iovp)*sizeof(u64)+7;
......@@ -775,32 +792,45 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
} while (byte_cnt > 0);
}
WRITE_REG32(iovp, ioc->ioc_hpa+IOC_PCOM);
WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM);
}
/**
* sba_dma_supported - PCI driver can query DMA support
* @dev: instance of PCI owned by the driver that's asking
* @mask: number of address bits this PCI device can handle
*
* See Documentation/DMA-mapping.txt
*/
static int
sba_dma_supported( struct pci_dev *dev, u64 mask)
{
if (dev == NULL) {
printk(MODULE_NAME ": EISA/ISA/et al not supported\n");
printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
BUG();
return(0);
}
dev->dma_mask = mask; /* save it */
/* only support PCI devices */
return((int) (mask >= 0xffffffff));
/* only support 32-bit PCI devices - no DAC support (yet) */
return((int) (mask == 0xffffffff));
}
/*
** map_single returns a fully formed IOVA
*/
/**
* sba_map_single - map one buffer and return IOVA for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @addr: driver buffer to map.
* @size: number of bytes to map in driver buffer.
* @direction: R/W or both.
*
* See Documentation/DMA-mapping.txt
*/
static dma_addr_t
sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
{
struct ioc *ioc = &sba_list->ioc[0]; /* FIXME : see Multi-IOC below */
struct ioc *ioc;
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
......@@ -808,9 +838,14 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
int pide;
ASSERT(size > 0);
ASSERT(size <= DMA_CHUNK_SIZE);
ASSERT(dev->sysdata);
ioc = GET_IOC(dev);
ASSERT(ioc);
/* save offset bits */
offset = ((dma_addr_t) addr) & ~IOVP_MASK;
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
/* round up to nearest IOVP_SIZE */
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
......@@ -827,7 +862,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
pide = sba_alloc_range(ioc, size);
iovp = (dma_addr_t) pide << IOVP_SHIFT;
DBG_RUN("sba_map_single() 0x%p -> 0x%lx", addr, (long) iovp | offset);
DBG_RUN("%s() 0x%p -> 0x%lx",
__FUNCTION__, addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
......@@ -860,72 +896,85 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
}
/**
* sba_unmap_single - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
* @direction: R/W or both.
*
* See Documentation/DMA-mapping.txt
*/
static void
sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction)
{
#ifdef FIXME
/* Multi-IOC (ie N-class) : need to lookup IOC from dev
** o If we can't know about lba PCI data structs, that eliminates ->sysdata.
** o walking up pcidev->parent dead ends at elroy too
** o leaves hashing dev->bus->number into some lookup.
** (may only work for N-class)
** o use (struct pci_hba) and put fields in there for DMA.
** (ioc and per device dma_hint.)
**
** Last one seems the clearest and most promising.
** sba_dma_supported() fill in those fields when the driver queries
** the system for support.
*/
struct ioc *ioc = (struct ioc *) ((struct pci_hba *) (dev->sysdata))->dma_data;
#else
struct ioc *ioc = &sba_list->ioc[0];
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
struct sba_dma_pair *d;
#endif
unsigned long flags;
dma_addr_t offset;
ASSERT(dev->sysdata);
ioc = GET_IOC(dev);
ASSERT(ioc);
offset = iova & ~IOVP_MASK;
DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size);
DBG_RUN("%s() iovp 0x%lx/%x\n",
__FUNCTION__, (long) iova, size);
iova ^= offset; /* clear offset bits */
size += offset;
size = ROUNDUP(size, IOVP_SIZE);
ASSERT(0 != iova);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS
ioc->usingle_calls++;
ioc->usingle_pages += size >> IOVP_SHIFT;
#endif
#ifdef DELAYED_RESOURCE_CNT
if (ioc->saved_cnt < DELAYED_RESOURCE_CNT) {
ioc->saved_iova[ioc->saved_cnt] = iova;
ioc->saved_size[ioc->saved_cnt] = size;
ioc_saved_cnt++;
} else {
do {
#endif
sba_mark_invalid(ioc, iova, size);
sba_free_range(ioc, iova, size);
#ifdef DELAYED_RESOURCE_CNT
ioc->saved_cnt--;
iova = ioc->saved_iova[ioc->saved_cnt];
size = ioc->saved_size[ioc->saved_cnt];
} while (ioc->saved_cnt)
/* flush purges */
(void) (volatile) READ_REG32(ioc->ioc_hpa+IOC_PCOM);
#if DELAYED_RESOURCE_CNT > 0
d = &(ioc->saved[ioc->saved_cnt]);
d->iova = iova;
d->size = size;
if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
int cnt = ioc->saved_cnt;
while (cnt--) {
sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size);
d--;
}
#else
/* flush purges */
READ_REG32(ioc->ioc_hpa+IOC_PCOM);
#endif
ioc->saved_cnt = 0;
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
}
#else /* DELAYED_RESOURCE_CNT == 0 */
sba_mark_invalid(ioc, iova, size);
sba_free_range(ioc, iova, size);
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */
spin_unlock_irqrestore(&ioc->res_lock, flags);
/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
** For Astro based systems this isn't a big deal WRT performance.
** As long as 2.4 kernels copyin/copyout data from/to userspace,
** we don't need the syncdma. The issue here is I/O MMU cachelines
** are *not* coherent in all cases. May be hwrev dependent.
** Need to investigate more.
asm volatile("syncdma");
*/
}
/**
* sba_alloc_consistent - allocate/map shared mem for DMA
* @hwdev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
* See Documentation/DMA-mapping.txt
*/
static void *
sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
{
......@@ -948,6 +997,15 @@ sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
}
/**
* sba_free_consistent - free/unmap shared mem for DMA
* @hwdev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
* See Documentation/DMA-mapping.txt
*/
static void
sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
......@@ -955,27 +1013,6 @@ sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t
free_pages((unsigned long) vaddr, get_order(size));
}
/*
** Two address ranges are "virtually contiguous" iff:
** 1) end of prev == start of next, or... append case
** 3) end of next == start of prev prepend case
**
** and they are DMA contiguous *iff*:
** 2) end of prev and start of next are both on a page boundry
**
** (shift left is a quick trick to mask off upper bits)
*/
#define DMA_CONTIG(__X, __Y) \
(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
/*
** Assumption is two transactions are mutually exclusive.
** ie both go to different parts of memory.
** If both are true, then both transaction are on the same page.
*/
#define DMA_SAME_PAGE(s1,e1,s2,e2) \
( ((((s1) ^ (s2)) >> PAGE_SHIFT) == 0) \
&& ((((e1) ^ (e2)) >> PAGE_SHIFT) == 0) )
/*
** Since 0 is a valid pdir_base index value, can't use that
......@@ -988,6 +1025,17 @@ sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t
int dump_run_sg = 0;
#endif
/**
* sba_fill_pdir - write allocated SG entries into IO PDIR
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* Take preprocessed SG list and write corresponding entries
* in the IO PDIR.
*/
static SBA_INLINE int
sba_fill_pdir(
struct ioc *ioc,
......@@ -1006,16 +1054,16 @@ sba_fill_pdir(
#ifdef DEBUG_LARGE_SG_ENTRIES
if (dump_run_sg)
printk(" %d : %08lx/%05x %p/%05x\n",
printk(KERN_DEBUG " %2d : %08lx/%05x %p/%05x\n",
nents,
(unsigned long) sg_dma_address(startsg), cnt,
startsg->address, startsg->length
sg_virt_address(startsg), startsg->length
);
#else
DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n",
nents,
(unsigned long) sg_dma_address(startsg), cnt,
startsg->address, startsg->length
sg_virt_addr(startsg), startsg->length
);
#endif
/*
......@@ -1024,11 +1072,10 @@ sba_fill_pdir(
if (sg_dma_address(startsg) & PIDE_FLAG) {
u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
dma_offset = (unsigned long) pide & ~IOVP_MASK;
pide >>= IOVP_SHIFT;
pdirp = &(ioc->pdir_base[pide]);
sg_dma_address(startsg) = 0;
++dma_sg;
sg_dma_address(dma_sg) = (pide << IOVP_SHIFT) + dma_offset;
dma_sg++;
sg_dma_address(dma_sg) = pide;
pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
n_mappings++;
}
......@@ -1036,9 +1083,12 @@ sba_fill_pdir(
** Look for a VCONTIG chunk
*/
if (cnt) {
unsigned long vaddr = (unsigned long) startsg->address;
unsigned long vaddr = (unsigned long) sg_virt_addr(startsg);
ASSERT(pdirp);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
sg_dma_len(dma_sg) += cnt;
cnt += dma_offset;
dma_offset=0; /* only want offset on first chunk */
......@@ -1062,133 +1112,142 @@ sba_fill_pdir(
}
/*
** First pass is to walk the SG list and determine where the breaks are
** in the DMA stream. Allocates PDIR entries but does not fill them.
** Returns the number of DMA chunks.
** Two address ranges are DMA contiguous *iff* "end of prev" and
** "start of next" are both on a page boundry.
**
** Doing the fill seperate from the coalescing/allocation keeps the
** code simpler. Future enhancement could make one pass through
** the sglist do both.
** (shift left is a quick trick to mask off upper bits)
*/
#define DMA_CONTIG(__X, __Y) \
(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
/**
* sba_coalesce_chunks - preprocess the SG list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* First pass is to walk the SG list and determine where the breaks are
* in the DMA stream. Allocates PDIR entries but does not fill them.
* Returns the number of DMA chunks.
*
* Doing the fill seperate from the coalescing/allocation keeps the
* code simpler. Future enhancement could make one pass through
* the sglist do both.
*/
static SBA_INLINE int
sba_coalesce_chunks( struct ioc *ioc,
struct scatterlist *startsg,
int nents)
{
struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
unsigned long vcontig_len; /* len of VCONTIG chunk */
unsigned long vcontig_end;
struct scatterlist *dma_sg; /* next DMA stream head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
while (nents > 0) {
struct scatterlist *dma_sg; /* next DMA stream head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
struct scatterlist *chunksg; /* virtually contig chunk head */
unsigned long chunk_addr, chunk_len; /* start/len of VCONTIG chunk */
unsigned long vaddr = (unsigned long) sg_virt_addr(startsg);
/*
** Prepare for first/next DMA stream
*/
dma_sg = chunksg = startsg;
dma_len = chunk_len = startsg->length;
chunk_addr = (unsigned long) startsg->address;
dma_offset = 0UL;
dma_sg = vcontig_sg = startsg;
dma_len = vcontig_len = vcontig_end = startsg->length;
vcontig_end += vaddr;
dma_offset = vaddr & ~IOVP_MASK;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
sg_dma_len(startsg) = 0;
/*
** This loop terminates one iteration "early" since
** it's always looking one "ahead".
*/
while (--nents > 0) {
/* ptr to coalesce prev and next */
struct scatterlist *prev_sg = startsg;
unsigned long prev_end = (unsigned long) prev_sg->address + prev_sg->length;
unsigned long current_end;
unsigned long vaddr; /* tmp */
startsg++;
/* PARANOID: clear entries */
sg_dma_address(startsg) = 0;
sg_dma_len(startsg) = 0;
/* Now start looking ahead */
startsg++;
current_end = (unsigned long) startsg->address + startsg->length;
/* catch brokenness in SCSI layer */
ASSERT(startsg->length <= DMA_CHUNK_SIZE);
/*
** First look for virtually contiguous blocks.
** PARISC needs this since it's cache is virtually
** indexed and we need the associated virtual
** address for each I/O address we map.
**
** 1) can we *prepend* the next transaction?
** First make sure current dma stream won't
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
if (current_end == (unsigned long) prev_sg->address)
{
/* prepend : get new offset */
chunksg = startsg;
chunk_addr = (unsigned long) prev_sg->address;
chunk_len += startsg->length;
dma_len += startsg->length;
continue;
}
if (((dma_len + dma_offset + startsg->length + ~IOVP_MASK) & IOVP_MASK) > DMA_CHUNK_SIZE)
break;
/*
** 2) or append the next transaction?
** Then look for virtually contiguous blocks.
** PARISC needs to associate a virtual address
** with each IO address mapped. The CPU cache is
** virtually tagged and the IOMMU uses part
** of the virtual address to participate in
** CPU cache coherency.
**
** append the next transaction?
*/
if (prev_end == (unsigned long) startsg->address)
vaddr = (unsigned long) sg_virt_addr(startsg);
if (vcontig_end == vaddr)
{
chunk_len += startsg->length;
vcontig_len += startsg->length;
vcontig_end += startsg->length;
dma_len += startsg->length;
continue;
}
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg = (chunk_len > IOVP_SIZE);
dump_run_sg = (vcontig_len > IOVP_SIZE);
#endif
/*
** Not virtually contigous.
** Terminate prev chunk.
** Start a new chunk.
**
** Once we start a new VCONTIG chunk, the offset
** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
sg_dma_len(prev_sg) = chunk_len;
sg_dma_len(vcontig_sg) = vcontig_len;
chunk_len = startsg->length;
dma_offset |= (chunk_addr & ~IOVP_MASK);
ASSERT((0 == (chunk_addr & ~IOVP_MASK)) ||
(dma_offset == (chunk_addr & ~IOVP_MASK)));
vcontig_sg = startsg;
vcontig_len = startsg->length;
#if 0
/*
** 4) do the chunks end/start on page boundaries?
** Easier than 3 since no offsets are involved.
** 3) do the entries end/start on page boundaries?
** Don't update vcontig_end until we've checked.
*/
if (DMA_CONTIG(prev_end, startsg->address))
if (DMA_CONTIG(vcontig_end, vaddr))
{
/*
** Yes.
** Reset chunk ptr.
*/
chunksg = startsg;
chunk_addr = (unsigned long) startsg->address;
vcontig_end = vcontig_len + vaddr;
dma_len += vcontig_len;
continue;
} else
#endif
{
} else {
break;
}
}
/*
** End of DMA Stream
** Terminate chunk.
** Terminate last VCONTIG block.
** Allocate space for DMA stream.
*/
sg_dma_len(startsg) = chunk_len;
sg_dma_len(vcontig_sg) = vcontig_len;
dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK;
ASSERT(dma_len <= DMA_CHUNK_SIZE);
sg_dma_address(dma_sg) =
PIDE_FLAG
| (sba_alloc_range(ioc, dma_len) << IOVP_SHIFT)
......@@ -1200,24 +1259,34 @@ sba_coalesce_chunks( struct ioc *ioc,
}
/*
** And this algorithm still generally only ends up coalescing entries
** that happens to be on the same page due to how sglists are assembled.
*/
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @direction: R/W or both.
*
* See Documentation/DMA-mapping.txt
*/
static int
sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
{
struct ioc *ioc = &sba_list->ioc[0]; /* FIXME : see Multi-IOC below */
struct ioc *ioc;
int coalesced, filled = 0;
unsigned long flags;
DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
ASSERT(dev->sysdata);
ioc = GET_IOC(dev);
ASSERT(ioc);
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg_dma_address(sglist)= sba_map_single(dev, sglist->address,
sg_dma_address(sglist) = sba_map_single(dev,
(void *)sg_virt_addr(sglist),
sglist->length, direction);
sg_dma_len(sglist)= sglist->length;
sg_dma_len(sglist) = sglist->length;
return 1;
}
......@@ -1272,16 +1341,29 @@ sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direc
}
/**
* sba_unmap_sg - unmap Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @direction: R/W or both.
*
* See Documentation/DMA-mapping.txt
*/
static void
sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
{
struct ioc *ioc = &sba_list->ioc[0]; /* FIXME : see Multi-IOC below */
struct ioc *ioc;
#ifdef ASSERT_PDIR_SANITY
unsigned long flags;
#endif
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__FUNCTION__, nents, sglist->address, sglist->length);
__FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);
ASSERT(dev->sysdata);
ioc = GET_IOC(dev);
ASSERT(ioc);
#ifdef CONFIG_PROC_FS
ioc->usg_calls++;
......@@ -1295,10 +1377,11 @@ sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int dir
while (sg_dma_len(sglist) && nents--) {
sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
#ifdef CONFIG_PROC_FS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
#endif
sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
++sglist;
}
......@@ -1359,21 +1442,117 @@ PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
* Initialization and claim
*
***************************************************************/
#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */
#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */
static void *
sba_alloc_pdir(unsigned int pdir_size)
{
unsigned long pdir_base;
unsigned long pdir_order = get_order(pdir_size);
pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
if (NULL == (void *) pdir_base)
panic("sba_ioc_init() could not allocate I/O Page Table\n");
/* If this is not PA8700 (PCX-W2)
** OR newer than ver 2.2
** OR in a system that doesn't need VINDEX bits from SBA,
**
** then we aren't exposed to the HW bug.
*/
if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
|| (boot_cpu_data.pdc.versions > 0x202)
|| (boot_cpu_data.pdc.capabilities & 0x08L) )
return (void *) pdir_base;
/*
* PA8700 (PCX-W2, aka piranha) silent data corruption fix
*
* An interaction between PA8700 CPU (Ver 2.2 or older) and
* Ike/Astro can cause silent data corruption. This is only
* a problem if the I/O PDIR is located in memory such that
* (little-endian) bits 17 and 18 are on and bit 20 is off.
*
* Since the max IO Pdir size is 2MB, by cleverly allocating the
* right physical address, we can either avoid (IOPDIR <= 1MB)
* or minimize (2MB IO Pdir) the problem if we restrict the
* IO Pdir to a maximum size of 2MB-128K (1902K).
*
* Because we always allocate 2^N sized IO pdirs, either of the
* "bad" regions will be the last 128K if at all. That's easy
* to test for.
*
*/
if (pdir_order <= (19-12)) {
if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
/* allocate a new one on 512k alignment */
unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
/* release original */
free_pages(pdir_base, pdir_order);
pdir_base = new_pdir;
/* release excess */
while (pdir_order < (19-12)) {
new_pdir += pdir_size;
free_pages(new_pdir, pdir_order);
pdir_order +=1;
pdir_size <<=1;
}
}
} else {
/*
** 1MB or 2MB Pdir
** Needs to be aligned on an "odd" 1MB boundary.
*/
unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */
/* release original */
free_pages( pdir_base, pdir_order);
/* release first 1MB */
free_pages(new_pdir, 20-12);
pdir_base = new_pdir + 1024*1024;
if (pdir_order > (20-12)) {
/*
** 2MB Pdir.
**
** Flag tells init_bitmap() to mark bad 128k as used
** and to reduce the size by 128k.
*/
piranha_bad_128k = 1;
new_pdir += 3*1024*1024;
/* release last 1MB */
free_pages(new_pdir, 20-12);
/* release unusable 128KB */
free_pages(new_pdir - 128*1024 , 17-12);
pdir_size -= 128*1024;
}
}
memset((void *) pdir_base, 0, pdir_size);
return (void *) pdir_base;
}
static void
sba_ioc_init(struct ioc *ioc)
sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
{
extern unsigned long mem_max; /* arch.../setup.c */
extern void lba_init_iregs(void *, u32, u32); /* arch.../lba_pci.c */
/* lba_set_iregs() is in arch/parisc/kernel/lba_pci.c */
extern void lba_set_iregs(struct parisc_device *, u32, u32);
u32 iova_space_size, iova_space_mask;
void * pdir_base;
int pdir_size, iov_order;
unsigned long physmem;
struct parisc_device *lba;
/*
** Determine IOVA Space size from memory size.
** Using "mem_max" is a kluge.
**
** Ideally, PCI drivers would register the maximum number
** of DMA they can have outstanding for each device they
......@@ -1385,20 +1564,24 @@ sba_ioc_init(struct ioc *ioc)
** While we have 32-bits "IOVA" space, top two 2 bits are used
** for DMA hints - ergo only 30 bits max.
*/
physmem = num_physpages << PAGE_SHIFT;
iova_space_size = (u32) (physmem/(sba_mem_ratio*global_ioc_cnt));
/* limit IOVA space size to 1MB-1GB */
if (mem_max < (sba_mem_ratio*1024*1024)) {
if (iova_space_size < 1024*1024) {
iova_space_size = 1024*1024;
}
#ifdef __LP64__
} else if (mem_max > (sba_mem_ratio*512*1024*1024)) {
else if (iova_space_size > 512*1024*1024) {
iova_space_size = 512*1024*1024;
#endif
} else {
iova_space_size = (u32) (mem_max/sba_mem_ratio);
}
#endif
/*
** iova space must be log2() in size.
** thus, pdir/res_map will also be log2().
** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced)
*/
iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT));
ASSERT(iov_order <= (30 - IOVP_SHIFT)); /* iova_space_size <= 1GB */
......@@ -1407,35 +1590,27 @@ sba_ioc_init(struct ioc *ioc)
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
ASSERT(pdir_size < 4*1024*1024); /* max pdir size < 4MB */
ASSERT(pdir_size < 4*1024*1024); /* max pdir size == 2MB */
/* Verify it's a power of two */
ASSERT((1 << get_order(pdir_size)) == (pdir_size >> PAGE_SHIFT));
DBG_INIT("%s() hpa 0x%p mem %dMBIOV %dMB (%d bits) PDIR size 0x%0x",
__FUNCTION__, ioc->ioc_hpa, (int) (mem_max>>20),
DBG_INIT("%s() hpa 0x%lx mem %dMB IOV %dMB (%d bits) PDIR size 0x%0x\n",
__FUNCTION__, ioc->ioc_hpa, (int) (physmem>>20),
iova_space_size>>20, iov_order + PAGE_SHIFT, pdir_size);
/* FIXME : DMA HINTs not used */
ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
ioc->pdir_base =
pdir_base = (void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size));
if (NULL == pdir_base)
{
panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__);
}
memset(pdir_base, 0, pdir_size);
ioc->pdir_base = sba_alloc_pdir(pdir_size);
DBG_INIT("sba_ioc_init() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",
pdir_base, pdir_size,
DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",
__FUNCTION__, ioc->pdir_base, pdir_size,
ioc->hint_shift_pdir, ioc->hint_mask_pdir);
ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base);
WRITE_REG64(virt_to_phys(pdir_base), (u64 *)(ioc->ioc_hpa+IOC_PDIR_BASE));
DBG_INIT(" base %p\n", pdir_base);
ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
/* build IMASK for IOC and Elroy */
iova_space_mask = 0xffffffff;
......@@ -1448,8 +1623,8 @@ sba_ioc_init(struct ioc *ioc)
ioc->ibase = IOC_IOVA_SPACE_BASE | 1; /* bit 0 == enable bit */
ioc->imask = iova_space_mask; /* save it */
DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", __FUNCTION__,
ioc->ibase, ioc->imask);
DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
__FUNCTION__, ioc->ibase, ioc->imask);
/*
** FIXME: Hint registers are programmed with default hint
......@@ -1460,22 +1635,26 @@ sba_ioc_init(struct ioc *ioc)
/*
** setup Elroy IBASE/IMASK registers as well.
*/
lba_init_iregs(ioc->ioc_hpa, ioc->ibase, ioc->imask);
for (lba = sba->child; lba; lba = lba->sibling) {
int rope_num = (lba->hpa >> 13) & 0xf;
if (rope_num >> 3 == ioc_num)
lba_set_iregs(lba, ioc->ibase, ioc->imask);
}
/*
** Program the IOC's ibase and enable IOVA translation
*/
WRITE_REG32(ioc->ibase, ioc->ioc_hpa+IOC_IBASE);
WRITE_REG32(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
WRITE_REG(ioc->ibase, ioc->ioc_hpa+IOC_IBASE);
WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
/* Set I/O PDIR Page size to 4K */
WRITE_REG32(0, ioc->ioc_hpa+IOC_TCNFG);
WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
/*
** Clear I/O TLB of any possible entries.
** (Yes. This is a it paranoid...but so what)
** (Yes. This is a bit paranoid...but so what)
*/
WRITE_REG32(0 | 31, ioc->ioc_hpa+IOC_PCOM);
WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
DBG_INIT("%s() DONE\n", __FUNCTION__);
}
......@@ -1498,23 +1677,24 @@ sba_hw_init(struct sba_device *sba_dev)
{
int i;
int num_ioc;
u32 ioc_ctl;
u64 ioc_ctl;
ioc_ctl = READ_REG32(sba_dev->sba_hpa+IOC_CTRL);
DBG_INIT("%s() hpa 0x%p ioc_ctl 0x%x ->", __FUNCTION__, sba_dev->sba_hpa, ioc_ctl );
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC);
ASSERT(ioc_ctl & IOC_CTRL_TE); /* astro: firmware enables this */
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
__FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
ioc_ctl |= IOC_CTRL_TC; /* Astro: firmware enables this */
WRITE_REG32(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
#ifdef SBA_DEBUG_INIT
ioc_ctl = READ_REG32(sba_dev->sba_hpa+IOC_CTRL);
DBG_INIT(" 0x%x\n", ioc_ctl );
#ifdef DEBUG_SBA_INIT
ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
DBG_INIT(" 0x%Lx\n", ioc_ctl);
#endif
if (IS_ASTRO(sba_dev->iodc)) {
/* PAT_PDC (L-class) also reports the same goofy base */
sba_dev->ioc[0].ioc_hpa = (char *) ASTRO_IOC_OFFSET;
sba_dev->ioc[0].ioc_hpa = ASTRO_IOC_OFFSET;
num_ioc = 1;
} else {
sba_dev->ioc[0].ioc_hpa = sba_dev->ioc[1].ioc_hpa = 0;
......@@ -1522,26 +1702,25 @@ sba_hw_init(struct sba_device *sba_dev)
}
sba_dev->num_ioc = num_ioc;
for( i = 0; i < num_ioc; i++)
{
(unsigned long) sba_dev->ioc[i].ioc_hpa += (unsigned long) sba_dev->sba_hpa + IKE_IOC_OFFSET(i);
for (i = 0; i < num_ioc; i++) {
sba_dev->ioc[i].ioc_hpa += sba_dev->sba_hpa + IKE_IOC_OFFSET(i);
/*
** Make sure the box crashes if we get any errors on a rope.
*/
WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL);
WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL);
WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL);
WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL);
WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL);
WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL);
WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL);
WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL);
WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL);
WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL);
WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL);
WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL);
WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL);
WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL);
WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
/* flush out the writes */
READ_REG32(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
sba_ioc_init(&(sba_dev->ioc[i]));
sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
}
}
......@@ -1555,11 +1734,10 @@ sba_common_init(struct sba_device *sba_dev)
*/
sba_dev->next = sba_list;
sba_list = sba_dev;
sba_count++;
for(i=0; i< sba_dev->num_ioc; i++) {
int res_size;
#ifdef CONFIG_DMB_TRAP
#ifdef DEBUG_DMB_TRAP
extern void iterate_pages(unsigned long , unsigned long ,
void (*)(pte_t * , unsigned long),
unsigned long );
......@@ -1567,13 +1745,20 @@ sba_common_init(struct sba_device *sba_dev)
#endif
/* resource map size dictated by pdir_size */
res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
/* Second part of PIRANHA BUG */
if (piranha_bad_128k) {
res_size -= (128*1024)/sizeof(u64);
}
res_size >>= 3; /* convert bit count to byte count */
DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, res_size);
DBG_INIT("%s() res_size 0x%x\n",
__FUNCTION__, res_size);
sba_dev->ioc[i].res_size = res_size;
sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
#ifdef CONFIG_DMB_TRAP
#ifdef DEBUG_DMB_TRAP
iterate_pages( sba_dev->ioc[i].res_map, res_size,
set_data_memory_break, 0);
#endif
......@@ -1594,39 +1779,66 @@ sba_common_init(struct sba_device *sba_dev)
sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
#endif
#ifdef CONFIG_DMB_TRAP
/* Third (and last) part of PIRANHA BUG */
if (piranha_bad_128k) {
/* region from +1408K to +1536 is un-usable. */
int idx_start = (1408*1024/sizeof(u64)) >> 3;
int idx_end = (1536*1024/sizeof(u64)) >> 3;
long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
/* mark that part of the io pdir busy */
while (p_start < p_end)
*p_start++ = -1;
}
#ifdef DEBUG_DMB_TRAP
iterate_pages( sba_dev->ioc[i].res_map, res_size,
set_data_memory_break, 0);
iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
set_data_memory_break, 0);
#endif
DBG_INIT("sba_common_init() %d res_map %x %p\n",
i, res_size, sba_dev->ioc[i].res_map);
DBG_INIT("%s() %d res_map %x %p\n",
__FUNCTION__, i, res_size, sba_dev->ioc[i].res_map);
}
sba_dev->sba_lock = SPIN_LOCK_UNLOCKED;
ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
#ifdef DEBUG_SBA_INIT
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
* (bit #61, big endian), we have to flush and sync every time
* IO-PDIR is changed in Ike/Astro.
*/
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) {
printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
} else {
printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
}
#endif
}
#ifdef CONFIG_PROC_FS
static int sba_proc_info(char *buf, char **start, off_t offset, int len)
{
struct sba_device *sba_dev = sba_list;
/* FIXME: Multi-IOC support broken! */
struct ioc *ioc = &sba_dev->ioc[0];
struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
unsigned long i = 0, avg = 0, min, max;
sprintf(buf, "%s rev %d.%d\n",
parisc_getHWdescription(sba_dev->iodc->hw_type,
sba_dev->iodc->hversion, sba_dev->iodc->sversion),
sba_dev->name,
(sba_dev->hw_rev & 0x7) + 1,
(sba_dev->hw_rev & 0x18) >> 3
);
sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n",
buf,
((ioc->res_size << 3) * sizeof(u64)), /* 8 bits per byte */
total_pages); /* 8 bits per byte */
(int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
total_pages);
sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf,
total_pages - ioc->used_pages, ioc->used_pages,
......@@ -1645,46 +1857,72 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len)
sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
buf, min, avg, max);
sprintf(buf, "%spci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
buf, ioc->msingle_calls, ioc->msingle_pages,
(int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
/* KLUGE - unmap_sg calls unmap_single for each mapped page */
min = ioc->usingle_calls - ioc->usg_calls;
min = ioc->usingle_calls;
max = ioc->usingle_pages - ioc->usg_pages;
sprintf(buf, "%spci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
buf, min, max,
(int) ((max * 1000)/min));
sprintf(buf, "%spci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
buf, ioc->msg_calls, ioc->msg_pages,
(int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
sprintf(buf, "%spci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
buf, ioc->usg_calls, ioc->usg_pages,
(int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
return strlen(buf);
}
#if 0
/* XXX too much output - exceeds 4k limit and needs to be re-written */
static int
sba_resource_map(char *buf, char **start, off_t offset, int len)
{
struct sba_device *sba_dev = sba_list;
struct ioc *ioc = &sba_dev->ioc[0];
unsigned long *res_ptr = (unsigned long *)ioc->res_map;
struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Mutli-IOC suppoer! */
unsigned int *res_ptr = (unsigned int *)ioc->res_map;
int i;
for(i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr) {
buf[0] = '\0';
for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) {
if ((i & 7) == 0)
strcat(buf,"\n ");
sprintf(buf, "%s %08lx", buf, *res_ptr);
sprintf(buf, "%s %08x", buf, *res_ptr);
}
strcat(buf, "\n");
return strlen(buf);
}
#endif
#endif /* 0 */
#endif /* CONFIG_PROC_FS */
static struct parisc_device_id sba_tbl[] = {
{ HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
/* These two entries commented out because we don't find them in a
* buswalk yet. If/when we do, they would cause us to think we had
* many more SBAs then we really do.
* { HPHW_BCPORT, HVERSION_REV_ANY_ID, ASTRO_ROPES_PORT, 0xc },
* { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_ROPES_PORT, 0xc },
*/
{ 0, }
};
int sba_driver_callback(struct parisc_device *);
static struct parisc_driver sba_driver = {
name: MODULE_NAME,
id_table: sba_tbl,
probe: sba_driver_callback,
};
/*
** Determine if lba should claim this chip (return 0) or not (return 1).
......@@ -1692,47 +1930,75 @@ sba_resource_map(char *buf, char **start, off_t offset, int len)
** have work to do.
*/
int
sba_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
sba_driver_callback(struct parisc_device *dev)
{
struct sba_device *sba_dev;
u32 func_class;
int i;
char *version;
if (IS_ASTRO(d)) {
static char astro_rev[]="Astro ?.?";
#ifdef DEBUG_SBA_INIT
sba_dump_ranges(dev->hpa);
#endif
/* Read HW Rev First */
func_class = READ_REG32(d->hpa);
func_class = READ_REG(dev->hpa + SBA_FCLASS);
astro_rev[6] = '1' + (char) (func_class & 0x7);
astro_rev[8] = '0' + (char) ((func_class & 0x18) >> 3);
dri->version = astro_rev;
} else {
static char ike_rev[]="Ike rev ?";
if (IS_ASTRO(&dev->id)) {
unsigned long fclass;
static char astro_rev[]="Astro ?.?";
/* Read HW Rev First */
func_class = READ_REG32(d->hpa + SBA_FCLASS);
/* Astro is broken...Read HW Rev First */
fclass = READ_REG(dev->hpa);
astro_rev[6] = '1' + (char) (fclass & 0x7);
astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
version = astro_rev;
} else if (IS_IKE(&dev->id)) {
static char ike_rev[]="Ike rev ?";
ike_rev[8] = '0' + (char) (func_class & 0xff);
dri->version = ike_rev;
version = ike_rev;
} else {
static char reo_rev[]="REO rev ?";
reo_rev[8] = '0' + (char) (func_class & 0xff);
version = reo_rev;
}
if (!global_ioc_cnt) {
global_ioc_cnt = count_parisc_driver(&sba_driver);
/* Only Astro has one IOC per SBA */
if (!IS_ASTRO(&dev->id))
global_ioc_cnt *= 2;
}
printk("%s found %s at 0x%p\n", dri->name, dri->version, d->hpa);
printk(KERN_INFO "%s found %s at 0x%lx\n",
MODULE_NAME, version, dev->hpa);
#ifdef DEBUG_SBA_INIT
sba_dump_tlb(dev->hpa);
#endif
sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL);
if (NULL == sba_dev)
{
printk(MODULE_NAME " - couldn't alloc sba_device\n");
if (NULL == sba_dev) {
printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
return(1);
}
dev->sysdata = (void *) sba_dev;
memset(sba_dev, 0, sizeof(struct sba_device));
for(i=0; i<MAX_IOC; i++)
spin_lock_init(&(sba_dev->ioc[i].res_lock));
sba_dev->dev = dev;
sba_dev->hw_rev = func_class;
sba_dev->iodc = d;
sba_dev->sba_hpa = d->hpa; /* faster access */
sba_dev->iodc = &dev->id;
sba_dev->name = dev->name;
sba_dev->sba_hpa = dev->hpa; /* faster access */
sba_get_pat_resources(sba_dev);
sba_hw_init(sba_dev);
......@@ -1741,12 +2007,46 @@ sba_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri)
hppa_dma_ops = &sba_ops;
#ifdef CONFIG_PROC_FS
if (IS_ASTRO(d)) {
if (IS_ASTRO(&dev->id)) {
create_proc_info_entry("Astro", 0, proc_runway_root, sba_proc_info);
} else {
} else if (IS_IKE(&dev->id)) {
create_proc_info_entry("Ike", 0, proc_runway_root, sba_proc_info);
} else {
create_proc_info_entry("Reo", 0, proc_runway_root, sba_proc_info);
}
#if 0
create_proc_info_entry("bitmap", 0, proc_runway_root, sba_resource_map);
#endif
#endif
return 0;
}
/*
** One time initialization to let the world know the SBA was found.
** This is the only routine which is NOT static.
** Must be called exactly once before pci_init().
*/
void __init sba_init(void)
{
register_parisc_driver(&sba_driver);
}
/**
* sba_get_iommu - Assign the iommu pointer for the pci bus controller.
* @dev: The parisc device.
*
* This function searches through the registerd IOMMU's and returns the
* appropriate IOMMU data for the given parisc PCI controller.
*/
void * sba_get_iommu(struct parisc_device *pci_hba)
{
struct sba_device *sba = (struct sba_device *) pci_hba->parent->sysdata;
char t = pci_hba->parent->id.hw_type;
int iocnum = (pci_hba->hw_path >> 3); /* rope # */
if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
BUG();
return &(sba->ioc[iocnum]);
}
/* National Semiconductor NS87560UBD Super I/O controller used in
* HP [BCJ]x000 workstations.
*
* This chip is a horrid piece of engineering, and National
* denies any knowledge of its existence. Thus no datasheet is
* available off www.national.com.
*
* (C) Copyright 2000 Linuxcare, Inc.
* (C) Copyright 2000 Linuxcare Canada, Inc.
* (C) Copyright 2000 Martin K. Petersen <mkp@linuxcare.com>
* (C) Copyright 2000 Alex deVries <alex@linuxcare.com>
* (C) Copyright 2001 John Marvin <jsm@fc.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* The initial version of this is by Martin Peterson. Alex deVries
* has spent a bit of time trying to coax it into working.
*
* Major changes to get basic interrupt infrastructure working to
* hopefully be able to support all SuperIO devices. Currently
* works with serial. -- John Marvin <jsm@fc.hp.com>
*/
/* NOTES:
*
* Function 0 is an IDE controller. It is identical to a PC87415 IDE
* controller (and identifies itself as such).
*
* Function 1 is a "Legacy I/O" controller. Under this function is a
* whole mess of legacy I/O peripherals. Of course, HP hasn't enabled
* all the functionality in hardware, but the following is available:
*
* Two 16550A compatible serial controllers
* An IEEE 1284 compatible parallel port
* A floppy disk controller
*
* Function 2 is a USB controller.
*
* We must be incredibly careful during initialization. Since all
* interrupts are routed through function 1 (which is not allowed by
* the PCI spec), we need to program the PICs on the legacy I/O port
* *before* we attempt to set up IDE and USB. @#$!&
*
* According to HP, devices are only enabled by firmware if they have
* a physical device connected.
*
* Configuration register bits:
* 0x5A: FDC, SP1, IDE1, SP2, IDE2, PAR, Reserved, P92
* 0x5B: RTC, 8259, 8254, DMA1, DMA2, KBC, P61, APM
*
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/serial.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
#include <linux/serial_reg.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#include <asm/superio.h>
static struct superio_device sio_dev = {
iosapic_irq: -1
};
#undef DEBUG_INIT
void
superio_inform_irq(int irq)
{
if (sio_dev.iosapic_irq != -1) {
printk(KERN_ERR "SuperIO: superio_inform_irq called twice! (more than one SuperIO?)\n");
BUG();
return;
}
sio_dev.iosapic_irq = irq;
}
static void
superio_interrupt(int irq, void *devp, struct pt_regs *regs)
{
struct superio_device *sio = (struct superio_device *)devp;
u8 results;
u8 local_irq;
/* Poll the 8259 to see if there's an interrupt. */
outb (OCW3_POLL,IC_PIC1+0);
results = inb(IC_PIC1+0);
if ((results & 0x80) == 0) {
#ifndef CONFIG_SMP
/* HACK: need to investigate why this happens if SMP enabled */
BUG(); /* This shouldn't happen */
#endif
return;
}
/* Check to see which device is interrupting */
local_irq = results & 0x0f;
if (local_irq == 2 || local_irq > 7) {
printk(KERN_ERR "SuperIO: slave interrupted!\n");
BUG();
return;
}
if (local_irq == 7) {
/* Could be spurious. Check in service bits */
outb(OCW3_ISR,IC_PIC1+0);
results = inb(IC_PIC1+0);
if ((results & 0x80) == 0) { /* if ISR7 not set: spurious */
printk(KERN_WARNING "SuperIO: spurious interrupt!\n");
return;
}
}
/* Call the appropriate device's interrupt */
do_irq(&sio->irq_region->action[local_irq],
sio->irq_region->data.irqbase + local_irq,
regs);
/* set EOI */
outb((OCW2_SEOI|local_irq),IC_PIC1 + 0);
return;
}
/* Initialize Super I/O device */
static void __devinit
superio_init(struct superio_device *sio)
{
struct pci_dev *pdev = sio->lio_pdev;
u16 word;
u8 i;
if (!pdev || sio->iosapic_irq == -1) {
printk(KERN_ERR "All SuperIO functions not found!\n");
BUG();
return;
}
printk (KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
pdev->slot_name,sio->iosapic_irq);
/* Find our I/O devices */
pci_read_config_word (pdev, SIO_SP1BAR, &sio->sp1_base);
sio->sp1_base &= ~1;
printk (KERN_INFO "SuperIO: Serial port 1 at 0x%x\n", sio->sp1_base);
pci_read_config_word (pdev, SIO_SP2BAR, &sio->sp2_base);
sio->sp2_base &= ~1;
printk (KERN_INFO "SuperIO: Serial port 2 at 0x%x\n", sio->sp2_base);
pci_read_config_word (pdev, SIO_PPBAR, &sio->pp_base);
sio->pp_base &= ~1;
printk (KERN_INFO "SuperIO: Parallel port at 0x%x\n", sio->pp_base);
pci_read_config_word (pdev, SIO_FDCBAR, &sio->fdc_base);
sio->fdc_base &= ~1;
printk (KERN_INFO "SuperIO: Floppy controller at 0x%x\n", sio->fdc_base);
pci_read_config_word (pdev, SIO_ACPIBAR, &sio->acpi_base);
sio->acpi_base &= ~1;
printk (KERN_INFO "SuperIO: ACPI at 0x%x\n", sio->acpi_base);
request_region (IC_PIC1, 0x1f, "pic1");
request_region (IC_PIC2, 0x1f, "pic2");
request_region (sio->acpi_base, 0x1f, "acpi");
/* Enable the legacy I/O function */
pci_read_config_word (pdev, PCI_COMMAND, &word);
word |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_IO;
pci_write_config_word (pdev, PCI_COMMAND, word);
pci_set_master (pdev);
/* Next project is programming the onboard interrupt
* controllers. PDC hasn't done this for us, since it's using
* polled I/O.
*/
/* Set PIC interrupts to edge triggered */
pci_write_config_byte (pdev, TRIGGER_1, 0x0);
pci_write_config_byte (pdev, TRIGGER_2, 0x0);
/* Disable all interrupt routing */
for (i = IR_LOW ; i < IR_HIGH ; i++)
pci_write_config_byte (pdev, i, 0x0);
/* PIC1 Initialization Command Word register programming */
outb (0x11,IC_PIC1+0); /* ICW1: ICW4 write req | ICW1 */
outb (0x00,IC_PIC1+1); /* ICW2: N/A */
outb (0x04,IC_PIC1+1); /* ICW3: Cascade */
outb (0x01,IC_PIC1+1); /* ICW4: x86 mode */
/* PIC1 Program Operational Control Words */
outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */
outb (0xc2,IC_PIC1+0); /* OCW2: priority (3-7,0-2) */
/* PIC2 Initialization Command Word register programming */
outb (0x11,IC_PIC2+0); /* ICW1: ICW4 write req | ICW1 */
outb (0x00,IC_PIC2+1); /* ICW2: N/A */
outb (0x02,IC_PIC2+1); /* ICW3: Slave ID code */
outb (0x01,IC_PIC2+1); /* ICW4: x86 mode */
/* Program Operational Control Words */
outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */
outb (0x68,IC_PIC1+0); /* OCW3: OCW3 select | ESMM | SMM */
/* Write master mask reg */
outb (0xff,IC_PIC1+1);
/* Set up interrupt routing */
pci_write_config_byte (pdev, IR_USB, 0x10); /* USB on IRQ1 */
pci_write_config_byte (pdev, IR_SER, 0x43); /* SP1 on IRQ3, SP2 on IRQ4 */
pci_write_config_byte (pdev, IR_PFD, 0x65); /* PAR on IRQ5, FDC on IRQ6 */
pci_write_config_byte (pdev, IR_IDE, 0x07); /* IDE1 on IRQ7 */
/* Set USB and IDE to level triggered interrupts, rest to edge */
pci_write_config_byte (pdev, TRIGGER_1, 0x82); /* IRQ 1 and 7 */
/* Setup USB power regulation */
outb(1, sio->acpi_base + USB_REG_CR);
if (inb(sio->acpi_base + USB_REG_CR) & 1)
printk(KERN_INFO "SuperIO: USB regulator enabled\n");
else
printk(KERN_ERR "USB regulator not initialized!\n");
pci_enable_device(pdev);
if (request_irq(sio->iosapic_irq,superio_interrupt,SA_INTERRUPT,
"SuperIO",(void *)sio)) {
printk(KERN_ERR "SuperIO: could not get irq\n");
BUG();
return;
}
sio->iosapic_irq_enabled = 1;
}
static void
superio_disable_irq(void *dev, int local_irq)
{
u8 r8;
if ((local_irq < 1) || (local_irq == 2) || (local_irq > 7)) {
printk(KERN_ERR "SuperIO: Illegal irq number.\n");
BUG();
return;
}
/* Mask interrupt */
r8 = inb(IC_PIC1+1);
r8 |= (1 << local_irq);
outb (r8,IC_PIC1+1);
}
static void
superio_enable_irq(void *dev, int local_irq)
{
struct superio_device *sio = (struct superio_device *)dev;
u8 r8;
if ((local_irq < 1) || (local_irq == 2) || (local_irq > 7)) {
printk(KERN_ERR "SuperIO: Illegal irq number.\n");
BUG();
return;
}
/*
* It's possible that we haven't initialized the legacy IO
* function yet. If not, do it now.
*/
if (!sio->iosapic_irq_enabled)
superio_init(sio);
/* Unmask interrupt */
r8 = inb(IC_PIC1+1);
r8 &= ~(1 << local_irq);
outb (r8,IC_PIC1+1);
}
static void
superio_mask_irq(void *dev, int local_irq)
{
BUG();
}
static void
superio_unmask_irq(void *dev, int local_irq)
{
BUG();
}
static struct irq_region_ops superio_irq_ops = {
disable_irq: superio_disable_irq,
enable_irq: superio_enable_irq,
mask_irq: superio_mask_irq,
unmask_irq: superio_unmask_irq
};
#ifdef DEBUG_INIT
static unsigned short expected_device[3] = {
PCI_DEVICE_ID_NS_87415,
PCI_DEVICE_ID_NS_87560_LIO,
PCI_DEVICE_ID_NS_87560_USB
};
#endif
int superio_fixup_irq(struct pci_dev *pcidev)
{
int local_irq;
#ifdef DEBUG_INIT
int fn;
fn = PCI_FUNC(pcidev->devfn);
/* Verify the function number matches the expected device id. */
if (expected_device[fn] != pcidev->device) {
BUG();
return -1;
}
printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %p\n",
pcidev->slot_name,
pcidev->vendor, pcidev->device,
__builtin_return_address(0));
#endif
if (!sio_dev.irq_region) {
/* Allocate an irq region for SuperIO devices */
sio_dev.irq_region = alloc_irq_region(SUPERIO_NIRQS,
&superio_irq_ops,
"SuperIO", (void *) &sio_dev);
if (!sio_dev.irq_region) {
printk(KERN_WARNING "SuperIO: alloc_irq_region failed\n");
return -1;
}
}
/*
* We don't allocate a SuperIO irq for the legacy IO function,
* since it is a "bridge". Instead, we will allocate irq's for
* each legacy device as they are initialized.
*/
switch(pcidev->device) {
case PCI_DEVICE_ID_NS_87415: /* Function 0 */
local_irq = IDE_IRQ;
break;
case PCI_DEVICE_ID_NS_87560_LIO: /* Function 1 */
sio_dev.lio_pdev = pcidev; /* save for later initialization */
return -1;
case PCI_DEVICE_ID_NS_87560_USB: /* Function 2 */
local_irq = USB_IRQ;
break;
default:
local_irq = -1;
BUG();
break;
}
return(sio_dev.irq_region->data.irqbase + local_irq);
}
void __devinit
superio_serial_init(void)
{
#ifdef CONFIG_SERIAL
struct serial_struct *serial;
int retval;
if (!sio_dev.irq_region)
return; /* superio not present */
if (!sio_dev.iosapic_irq_enabled)
superio_init(&sio_dev);
serial = kmalloc(2 * sizeof (struct serial_struct), GFP_KERNEL);
if (!serial) {
printk(KERN_WARNING "SuperIO: Could not get memory for serial struct.\n");
return;
}
memset(serial, 0, 2 * sizeof (struct serial_struct));
serial->type = PORT_16550A;
serial->line = 0;
serial->port = sio_dev.sp1_base;
serial->port_high = 0;
serial->irq = sio_dev.irq_region->data.irqbase + SP1_IRQ;
serial->io_type = SERIAL_IO_PORT;
serial->flags = 0;
serial->xmit_fifo_size = 16;
serial->custom_divisor = 0;
serial->baud_base = 115200;
retval = register_serial(serial);
if (retval < 0) {
printk(KERN_WARNING "SuperIO: Register Serial #0 failed.\n");
kfree (serial);
return;
}
serial++;
serial->type = PORT_16550A;
serial->line = 1;
serial->port = sio_dev.sp2_base;
serial->port_high = 0;
serial->irq = sio_dev.irq_region->data.irqbase + SP2_IRQ;
serial->io_type = SERIAL_IO_PORT;
serial->flags = 0;
serial->xmit_fifo_size = 16;
serial->custom_divisor = 0;
serial->baud_base = 115200;
retval = register_serial(serial);
if (retval < 0)
printk(KERN_WARNING "SuperIO: Register Serial #1 failed.\n");
#endif /* CONFIG_SERIAL */
}
EXPORT_SYMBOL(superio_serial_init);
#ifdef CONFIG_PARPORT_PC
void __devinit
superio_parport_init(void)
{
if (!sio_dev.irq_region)
return; /* superio not present */
if (!sio_dev.iosapic_irq_enabled)
superio_init(&sio_dev);
if (!parport_pc_probe_port(sio_dev.pp_base,
0 /*base_hi*/,
sio_dev.irq_region->data.irqbase + PAR_IRQ,
PARPORT_DMA_NONE /* dma */,
NULL /*struct pci_dev* */))
printk(KERN_WARNING "SuperIO: Probing parallel port failed.\n");
}
EXPORT_SYMBOL(superio_parport_init);
#endif /* CONFIG_PARPORT_PC */
int
superio_get_ide_irq(void)
{
if (sio_dev.irq_region)
return sio_dev.irq_region->data.irqbase + IDE_IRQ;
else
return 0;
}
EXPORT_SYMBOL(superio_get_ide_irq);
static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
#ifdef DEBUG_INIT
printk("superio_probe(%s) ven 0x%x dev 0x%x sv 0x%x sd 0x%x class 0x%x\n",
dev->slot_name,
dev->vendor, dev->device,
dev->subsystem_vendor, dev->subsystem_device,
dev->class);
/*
** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a
** superio_probe(00:0e.1) ven 0x100b dev 0xe sv 0x0 sd 0x0 class 0x68000
** superio_probe(00:0e.2) ven 0x100b dev 0x12 sv 0x0 sd 0x0 class 0xc0310
*/
#endif
/* superio_fixup_irq(dev); */
if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) {
#ifdef CONFIG_PARPORT_PC
superio_parport_init();
#endif
#ifdef CONFIG_SERIAL
superio_serial_init();
#endif
/* REVISIT : superio_fdc_init() ? */
return 0;
} else {
/* don't claim this device; let whatever either driver
* do it
*/
return -1;
}
}
static struct pci_device_id superio_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_NS, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
static struct pci_driver superio_driver = {
name: "SuperIO",
id_table: superio_tbl,
probe: superio_probe,
};
static int __init superio_modinit(void)
{
return pci_module_init(&superio_driver);
}
static void __exit superio_exit(void)
{
pci_unregister_driver(&superio_driver);
}
module_init(superio_modinit);
module_exit(superio_exit);
/*
* WAX Device Driver
*
* (c) Copyright 2000 The Puffin Group Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* by Helge Deller <deller@gmx.de>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#include "gsc.h"
#define WAX_GSC_IRQ 7 /* Hardcoded Interrupt for GSC */
#define WAX_GSC_NMI_IRQ 29
static int wax_choose_irq(struct parisc_device *dev)
{
int irq = -1;
switch (dev->id.sversion) {
case 0x73: irq = 30; break; /* HIL */
case 0x8c: irq = 25; break; /* RS232 */
case 0x90: irq = 21; break; /* WAX EISA BA */
}
return irq;
}
static void __init
wax_init_irq(struct busdevice *wax)
{
unsigned long base = wax->hpa;
/* Stop WAX barking for a bit */
gsc_writel(0x00000000, base+OFFSET_IMR);
/* clear pending interrupts */
(volatile u32) gsc_readl(base+OFFSET_IRR);
/* We're not really convinced we want to reset the onboard
* devices. Firmware does it for us...
*/
/* Resets */
// gsc_writel(0xFFFFFFFF, base+0x1000); /* HIL */
// gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */
/* Ok we hit it on the head with a hammer, our Dog is now
** comatose and muzzled. Devices will now unmask WAX
** interrupts as they are registered as irq's in the WAX range.
*/
}
int __init
wax_init_chip(struct parisc_device *dev)
{
struct busdevice *wax;
struct gsc_irq gsc_irq;
int irq, ret;
wax = kmalloc(sizeof(struct busdevice), GFP_KERNEL);
if (!wax)
return -ENOMEM;
wax->name = "Wax";
wax->hpa = dev->hpa;
wax->version = 0; /* gsc_readb(wax->hpa+WAX_VER); */
printk(KERN_INFO "%s at 0x%lx found.\n", wax->name, wax->hpa);
/* Stop wax hissing for a bit */
wax_init_irq(wax);
/* the IRQ wax should use */
irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ);
if (irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
__FUNCTION__);
kfree(wax);
return -EBUSY;
}
ret = request_irq(gsc_irq.irq, busdev_barked, 0, "wax", wax);
if (ret < 0) {
kfree(wax);
return ret;
}
/* Save this for debugging later */
wax->parent_irq = gsc_irq.irq;
wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
/* enable IRQ's for devices below WAX */
gsc_writel(wax->eim, wax->hpa + OFFSET_IAR);
/* Done init'ing, register this driver */
ret = gsc_common_irqsetup(dev, wax);
if (ret) {
kfree(wax);
return ret;
}
fixup_child_irqs(dev, wax->busdev_region->data.irqbase,
wax_choose_irq);
/* On 715-class machines, Wax EISA is a sibling of Wax, not a child. */
if (dev->parent->id.hw_type != HPHW_IOA) {
fixup_child_irqs(dev->parent, wax->busdev_region->data.irqbase,
wax_choose_irq);
}
return ret;
}
static struct parisc_device_id wax_tbl[] = {
{ HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008e },
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, wax_tbl);
struct parisc_driver wax_driver = {
name: "Wax",
id_table: wax_tbl,
probe: wax_init_chip,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment