Commit 6254a8ba authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-base-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents a2769608 feb7ed4f
...@@ -170,7 +170,7 @@ config VIRTUAL_MEM_MAP ...@@ -170,7 +170,7 @@ config VIRTUAL_MEM_MAP
config DISCONTIGMEM config DISCONTIGMEM
bool "Discontiguous memory support" bool "Discontiguous memory support"
depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC) && NUMA && VIRTUAL_MEM_MAP depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1) && NUMA && VIRTUAL_MEM_MAP
default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
help help
Say Y to support efficient handling of discontiguous physical memory, Say Y to support efficient handling of discontiguous physical memory,
...@@ -179,10 +179,10 @@ config DISCONTIGMEM ...@@ -179,10 +179,10 @@ config DISCONTIGMEM
See <file:Documentation/vm/numa> for more. See <file:Documentation/vm/numa> for more.
config IA64_CYCLONE config IA64_CYCLONE
bool "Support Cyclone(EXA) Time Source" bool "Cyclone (EXA) Time Source support"
help help
Say Y here to enable support for IBM EXA Cyclone time source. Say Y here to enable support for IBM EXA Cyclone time source.
If you're unsure, answer N. If you're unsure, answer N.
config IOSAPIC config IOSAPIC
bool bool
......
This diff is collapsed.
...@@ -469,6 +469,14 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) ...@@ -469,6 +469,14 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end); ASSERT(res_ptr < res_end);
/*
* N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
* if a TLB entry is purged while in use. sba_mark_invalid()
* purges IOTLB entries in power-of-two sizes, so we also
* allocate IOVA space in power-of-two sizes.
*/
bits_wanted = 1UL << get_iovp_order(bits_wanted << PAGE_SHIFT);
if (likely(bits_wanted == 1)) { if (likely(bits_wanted == 1)) {
unsigned int bitshiftcnt; unsigned int bitshiftcnt;
for(; res_ptr < res_end ; res_ptr++) { for(; res_ptr < res_end ; res_ptr++) {
...@@ -675,6 +683,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) ...@@ -675,6 +683,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
int bits_not_wanted = size >> iovp_shift; int bits_not_wanted = size >> iovp_shift;
unsigned long m; unsigned long m;
/* Round up to power-of-two size: see AR2305 note above */
bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << PAGE_SHIFT);
for (; bits_not_wanted > 0 ; res_ptr++) { for (; bits_not_wanted > 0 ; res_ptr++) {
if (unlikely(bits_not_wanted > BITS_PER_LONG)) { if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
......
...@@ -293,20 +293,20 @@ acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) ...@@ -293,20 +293,20 @@ acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
return 0; return 0;
} }
/* Hook from generic ACPI tables.c */ static void __init
void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) acpi_madt_oem_check (char *oem_id, char *oem_table_id)
{ {
if (!strncmp(oem_id, "IBM", 3) && if (!strncmp(oem_id, "IBM", 3) &&
(!strncmp(oem_table_id, "SERMOW", 6))){ (!strncmp(oem_table_id, "SERMOW", 6))) {
/* Unfortunatly ITC_DRIFT is not yet part of the /*
* Unfortunately ITC_DRIFT is not yet part of the
* official SAL spec, so the ITC_DRIFT bit is not * official SAL spec, so the ITC_DRIFT bit is not
* set by the BIOS on this hardware. * set by the BIOS on this hardware.
*/ */
sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
/*Start cyclone clock*/ cyclone_setup();
cyclone_setup(0);
} }
} }
......
...@@ -10,10 +10,9 @@ ...@@ -10,10 +10,9 @@
#define CYCLONE_TIMER_FREQ 100000000 #define CYCLONE_TIMER_FREQ 100000000
int use_cyclone; int use_cyclone;
int __init cyclone_setup(char *str) void __init cyclone_setup(void)
{ {
use_cyclone = 1; use_cyclone = 1;
return 1;
} }
static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */ static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
......
...@@ -130,6 +130,8 @@ static int cpe_poll_enabled = 1; ...@@ -130,6 +130,8 @@ static int cpe_poll_enabled = 1;
extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
static int mca_init;
/* /*
* IA64_MCA log support * IA64_MCA log support
*/ */
...@@ -542,7 +544,7 @@ ia64_mca_register_cpev (int cpev) ...@@ -542,7 +544,7 @@ ia64_mca_register_cpev (int cpev)
} }
IA64_MCA_DEBUG("%s: corrected platform error " IA64_MCA_DEBUG("%s: corrected platform error "
"vector %#x setup and enabled\n", __FUNCTION__, cpev); "vector %#x registered\n", __FUNCTION__, cpev);
} }
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
...@@ -551,8 +553,9 @@ ia64_mca_register_cpev (int cpev) ...@@ -551,8 +553,9 @@ ia64_mca_register_cpev (int cpev)
/* /*
* ia64_mca_cmc_vector_setup * ia64_mca_cmc_vector_setup
* *
* Setup the corrected machine check vector register in the processor and * Setup the corrected machine check vector register in the processor.
* unmask interrupt. This function is invoked on a per-processor basis. * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
* This function is invoked on a per-processor basis.
* *
* Inputs * Inputs
* None * None
...@@ -566,12 +569,12 @@ ia64_mca_cmc_vector_setup (void) ...@@ -566,12 +569,12 @@ ia64_mca_cmc_vector_setup (void)
cmcv_reg_t cmcv; cmcv_reg_t cmcv;
cmcv.cmcv_regval = 0; cmcv.cmcv_regval = 0;
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
cmcv.cmcv_vector = IA64_CMC_VECTOR; cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected " IA64_MCA_DEBUG("%s: CPU %d corrected "
"machine check vector %#x setup and enabled.\n", "machine check vector %#x registered.\n",
__FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR); __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
...@@ -1293,7 +1296,7 @@ ia64_mca_init(void) ...@@ -1293,7 +1296,7 @@ ia64_mca_init(void)
*/ */
register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
ia64_mca_cmc_vector_setup(); /* Setup vector on BSP & enable */ ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
/* Setup the MCA rendezvous interrupt vector */ /* Setup the MCA rendezvous interrupt vector */
register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
...@@ -1303,23 +1306,8 @@ ia64_mca_init(void) ...@@ -1303,23 +1306,8 @@ ia64_mca_init(void)
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */ /* Setup the CPEI/P vector and handler */
{ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
irq_desc_t *desc; register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
unsigned int irq;
cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
if (cpe_vector >= 0) {
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == cpe_vector) {
desc = irq_descp(irq);
desc->status |= IRQ_PER_CPU;
setup_irq(irq, &mca_cpe_irqaction);
}
ia64_mca_register_cpev(cpe_vector);
}
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
}
#endif #endif
/* Initialize the areas set aside by the OS to buffer the /* Initialize the areas set aside by the OS to buffer the
...@@ -1331,6 +1319,7 @@ ia64_mca_init(void) ...@@ -1331,6 +1319,7 @@ ia64_mca_init(void)
ia64_log_init(SAL_INFO_TYPE_CMC); ia64_log_init(SAL_INFO_TYPE_CMC);
ia64_log_init(SAL_INFO_TYPE_CPE); ia64_log_init(SAL_INFO_TYPE_CPE);
mca_init = 1;
printk(KERN_INFO "MCA related initialization done\n"); printk(KERN_INFO "MCA related initialization done\n");
} }
...@@ -1347,21 +1336,46 @@ ia64_mca_init(void) ...@@ -1347,21 +1336,46 @@ ia64_mca_init(void)
static int __init static int __init
ia64_mca_late_init(void) ia64_mca_late_init(void)
{ {
if (!mca_init)
return 0;
/* Setup the CMCI/P vector and handler */
init_timer(&cmc_poll_timer); init_timer(&cmc_poll_timer);
cmc_poll_timer.function = ia64_mca_cmc_poll; cmc_poll_timer.function = ia64_mca_cmc_poll;
/* Reset to the correct state */ /* Unmask/enable the vector */
cmc_polling_enabled = 0; cmc_polling_enabled = 0;
schedule_work(&cmc_enable_work);
IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
#ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */
init_timer(&cpe_poll_timer); init_timer(&cpe_poll_timer);
cpe_poll_timer.function = ia64_mca_cpe_poll; cpe_poll_timer.function = ia64_mca_cpe_poll;
#ifdef CONFIG_ACPI {
/* If platform doesn't support CPEI, get the timer going. */ irq_desc_t *desc;
if (cpe_vector < 0 && cpe_poll_enabled) { unsigned int irq;
ia64_mca_cpe_poll(0UL);
} else { if (cpe_vector >= 0) {
cpe_poll_enabled = 0; /* If platform supports CPEI, enable the irq. */
cpe_poll_enabled = 0;
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == cpe_vector) {
desc = irq_descp(irq);
desc->status |= IRQ_PER_CPU;
setup_irq(irq, &mca_cpe_irqaction);
}
ia64_mca_register_cpev(cpe_vector);
IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
} else {
/* If platform doesn't support CPEI, get the timer going. */
if (cpe_poll_enabled) {
ia64_mca_cpe_poll(0UL);
IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
}
}
} }
#endif #endif
......
...@@ -375,9 +375,10 @@ setup_arch (char **cmdline_p) ...@@ -375,9 +375,10 @@ setup_arch (char **cmdline_p)
} }
#endif #endif
/* enable IA-64 Machine Check Abort Handling */ /* enable IA-64 Machine Check Abort Handling unless disabled */
ia64_mca_init(); if (!strstr(saved_command_line, "nomca"))
ia64_mca_init();
platform_setup(cmdline_p); platform_setup(cmdline_p);
paging_init(); paging_init();
} }
......
...@@ -299,7 +299,7 @@ smp_callin (void) ...@@ -299,7 +299,7 @@ smp_callin (void)
smp_setup_percpu_timer(); smp_setup_percpu_timer();
ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */ ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
pfm_init_percpu(); pfm_init_percpu();
......
...@@ -37,7 +37,7 @@ nasid_t master_nasid = INVALID_NASID; /* This is the partition master nasid */ ...@@ -37,7 +37,7 @@ nasid_t master_nasid = INVALID_NASID; /* This is the partition master nasid */
* *
* This code is executed once for each Hub chip. * This code is executed once for each Hub chip.
*/ */
static void static void __init
per_hub_init(cnodeid_t cnode) per_hub_init(cnodeid_t cnode)
{ {
nasid_t nasid; nasid_t nasid;
...@@ -130,10 +130,8 @@ sgi_master_io_infr_init(void) ...@@ -130,10 +130,8 @@ sgi_master_io_infr_init(void)
klhwg_add_all_modules(hwgraph_root); klhwg_add_all_modules(hwgraph_root);
klhwg_add_all_nodes(hwgraph_root); klhwg_add_all_nodes(hwgraph_root);
for (cnode = 0; cnode < numionodes; cnode++) { for (cnode = 0; cnode < numionodes; cnode++)
extern void per_hub_init(cnodeid_t);
per_hub_init(cnode); per_hub_init(cnode);
}
/* /*
* *
......
...@@ -188,7 +188,7 @@ sn_irq_desc(unsigned int irq) ...@@ -188,7 +188,7 @@ sn_irq_desc(unsigned int irq)
} }
u8 u8
sn_irq_to_vector(u8 irq) sn_irq_to_vector(unsigned int irq)
{ {
return(irq); return(irq);
} }
......
...@@ -3,10 +3,10 @@ ...@@ -3,10 +3,10 @@
#ifdef CONFIG_IA64_CYCLONE #ifdef CONFIG_IA64_CYCLONE
extern int use_cyclone; extern int use_cyclone;
extern int __init cyclone_setup(char*); extern void __init cyclone_setup(void);
#else /* CONFIG_IA64_CYCLONE */ #else /* CONFIG_IA64_CYCLONE */
#define use_cyclone 0 #define use_cyclone 0
static inline void cyclone_setup(char* s) static inline void cyclone_setup(void)
{ {
printk(KERN_ERR "Cyclone Counter: System not configured" printk(KERN_ERR "Cyclone Counter: System not configured"
" w/ CONFIG_IA64_CYCLONE.\n"); " w/ CONFIG_IA64_CYCLONE.\n");
......
...@@ -29,8 +29,8 @@ typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *); ...@@ -29,8 +29,8 @@ typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long); typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *); typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef struct irq_desc *ia64_mv_irq_desc (unsigned int); typedef struct irq_desc *ia64_mv_irq_desc (unsigned int);
typedef u8 ia64_mv_irq_to_vector (u8); typedef u8 ia64_mv_irq_to_vector (unsigned int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8 vector); typedef unsigned int ia64_mv_local_vector_to_irq (u8);
/* DMA-mapping interface: */ /* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void); typedef void ia64_mv_dma_init (void);
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
#ifdef CONFIG_IA64_DIG #ifdef CONFIG_IA64_DIG
/* Max 8 Nodes */ /* Max 8 Nodes */
#define NODES_SHIFT 3 #define NODES_SHIFT 3
#elif defined(CONFIG_IA64_HP_ZX1)
/* Max 32 Nodes */
#define NODES_SHIFT 5
#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) #elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
/* Max 256 Nodes */ /* Max 256 Nodes */
#define NODES_SHIFT 8 #define NODES_SHIFT 8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment