Commit 4042fd20 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/linux-ia64-release-2.6.10

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents e5fb5549 fbeac694
...@@ -42,8 +42,7 @@ $(error Sorry, your compiler is too old. GCC v2.96 is known to generate bad cod ...@@ -42,8 +42,7 @@ $(error Sorry, your compiler is too old. GCC v2.96 is known to generate bad cod
endif endif
ifeq ($(GCC_VERSION),0304) ifeq ($(GCC_VERSION),0304)
# Workaround Itanium 1 bugs in gcc 3.4. cflags-$(CONFIG_ITANIUM) += -mtune=merced
# cflags-$(CONFIG_ITANIUM) += -mtune=merced
cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
endif endif
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/nodemask.h>
#include <linux/bitops.h> /* hweight64() */ #include <linux/bitops.h> /* hweight64() */
#include <asm/delay.h> /* ia64_get_itc() */ #include <asm/delay.h> /* ia64_get_itc() */
......
...@@ -559,6 +559,10 @@ struct user_regs_struct32 { ...@@ -559,6 +559,10 @@ struct user_regs_struct32 {
extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *); extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *);
extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *); extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *);
/* Prototypes for use in sys_ia32.c */
int copy_siginfo_to_user32 (siginfo_t32 __user *to, siginfo_t *from);
int copy_siginfo_from_user32 (siginfo_t *to, siginfo_t32 __user *from);
#endif /* !CONFIG_IA32_SUPPORT */ #endif /* !CONFIG_IA32_SUPPORT */
#endif /* _ASM_IA64_IA32_PRIV_H */ #endif /* _ASM_IA64_IA32_PRIV_H */
...@@ -119,6 +119,14 @@ static int __devinit cpu_to_phys_group(int cpu) ...@@ -119,6 +119,14 @@ static int __devinit cpu_to_phys_group(int cpu)
*/ */
static DEFINE_PER_CPU(struct sched_domain, node_domains); static DEFINE_PER_CPU(struct sched_domain, node_domains);
static struct sched_group *sched_group_nodes[MAX_NUMNODES]; static struct sched_group *sched_group_nodes[MAX_NUMNODES];
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
static struct sched_group sched_group_allnodes[MAX_NUMNODES];
static int __devinit cpu_to_allnodes_group(int cpu)
{
return cpu_to_node(cpu);
}
#endif #endif
/* /*
...@@ -149,9 +157,21 @@ void __devinit arch_init_sched_domains(void) ...@@ -149,9 +157,21 @@ void __devinit arch_init_sched_domains(void)
cpus_and(nodemask, nodemask, cpu_default_map); cpus_and(nodemask, nodemask, cpu_default_map);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (num_online_cpus()
> SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
sd = &per_cpu(allnodes_domains, i);
*sd = SD_ALLNODES_INIT;
sd->span = cpu_default_map;
group = cpu_to_allnodes_group(i);
sd->groups = &sched_group_allnodes[group];
p = sd;
} else
p = NULL;
sd = &per_cpu(node_domains, i); sd = &per_cpu(node_domains, i);
*sd = SD_NODE_INIT; *sd = SD_NODE_INIT;
sd->span = sched_domain_node_span(node); sd->span = sched_domain_node_span(node);
sd->parent = p;
cpus_and(sd->span, sd->span, cpu_default_map); cpus_and(sd->span, sd->span, cpu_default_map);
#endif #endif
...@@ -201,6 +221,9 @@ void __devinit arch_init_sched_domains(void) ...@@ -201,6 +221,9 @@ void __devinit arch_init_sched_domains(void)
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
init_sched_build_groups(sched_group_allnodes, cpu_default_map,
&cpu_to_allnodes_group);
for (i = 0; i < MAX_NUMNODES; i++) { for (i = 0; i < MAX_NUMNODES; i++) {
/* Set up node groups */ /* Set up node groups */
struct sched_group *sg, *prev; struct sched_group *sg, *prev;
...@@ -282,6 +305,15 @@ void __devinit arch_init_sched_domains(void) ...@@ -282,6 +305,15 @@ void __devinit arch_init_sched_domains(void)
power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
(cpus_weight(sd->groups->cpumask)-1) / 10; (cpus_weight(sd->groups->cpumask)-1) / 10;
sd->groups->cpu_power = power; sd->groups->cpu_power = power;
#ifdef CONFIG_NUMA
sd = &per_cpu(allnodes_domains, i);
if (sd->groups) {
power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
(cpus_weight(sd->groups->cpumask)-1) / 10;
sd->groups->cpu_power = power;
}
#endif
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
......
...@@ -593,7 +593,7 @@ ia64_mca_cmc_vector_disable (void *dummy) ...@@ -593,7 +593,7 @@ ia64_mca_cmc_vector_disable (void *dummy)
{ {
cmcv_reg_t cmcv; cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
...@@ -620,7 +620,7 @@ ia64_mca_cmc_vector_enable (void *dummy) ...@@ -620,7 +620,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
{ {
cmcv_reg_t cmcv; cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
......
...@@ -1269,7 +1269,6 @@ script_new (unsigned long ip) ...@@ -1269,7 +1269,6 @@ script_new (unsigned long ip)
{ {
struct unw_script *script, *prev, *tmp; struct unw_script *script, *prev, *tmp;
unw_hash_index_t index; unw_hash_index_t index;
unsigned long flags;
unsigned short head; unsigned short head;
STAT(++unw.stat.script.news); STAT(++unw.stat.script.news);
...@@ -1278,13 +1277,9 @@ script_new (unsigned long ip) ...@@ -1278,13 +1277,9 @@ script_new (unsigned long ip)
* Can't (easily) use cmpxchg() here because of ABA problem * Can't (easily) use cmpxchg() here because of ABA problem
* that is intrinsic in cmpxchg()... * that is intrinsic in cmpxchg()...
*/ */
spin_lock_irqsave(&unw.lock, flags); head = unw.lru_head;
{ script = unw.cache + head;
head = unw.lru_head; unw.lru_head = script->lru_chain;
script = unw.cache + head;
unw.lru_head = script->lru_chain;
}
spin_unlock(&unw.lock);
/* /*
* We'd deadlock here if we interrupted a thread that is holding a read lock on * We'd deadlock here if we interrupted a thread that is holding a read lock on
...@@ -1295,43 +1290,39 @@ script_new (unsigned long ip) ...@@ -1295,43 +1290,39 @@ script_new (unsigned long ip)
if (!write_trylock(&script->lock)) if (!write_trylock(&script->lock))
return NULL; return NULL;
spin_lock(&unw.lock); /* re-insert script at the tail of the LRU chain: */
{ unw.cache[unw.lru_tail].lru_chain = head;
/* re-insert script at the tail of the LRU chain: */ unw.lru_tail = head;
unw.cache[unw.lru_tail].lru_chain = head;
unw.lru_tail = head; /* remove the old script from the hash table (if it's there): */
if (script->ip) {
/* remove the old script from the hash table (if it's there): */ index = hash(script->ip);
if (script->ip) { tmp = unw.cache + unw.hash[index];
index = hash(script->ip); prev = NULL;
tmp = unw.cache + unw.hash[index]; while (1) {
prev = NULL; if (tmp == script) {
while (1) { if (prev)
if (tmp == script) { prev->coll_chain = tmp->coll_chain;
if (prev) else
prev->coll_chain = tmp->coll_chain; unw.hash[index] = tmp->coll_chain;
else break;
unw.hash[index] = tmp->coll_chain; } else
break; prev = tmp;
} else if (tmp->coll_chain >= UNW_CACHE_SIZE)
prev = tmp; /* old script wasn't in the hash-table */
if (tmp->coll_chain >= UNW_CACHE_SIZE) break;
/* old script wasn't in the hash-table */ tmp = unw.cache + tmp->coll_chain;
break;
tmp = unw.cache + tmp->coll_chain;
}
} }
}
/* enter new script in the hash table */ /* enter new script in the hash table */
index = hash(ip); index = hash(ip);
script->coll_chain = unw.hash[index]; script->coll_chain = unw.hash[index];
unw.hash[index] = script - unw.cache; unw.hash[index] = script - unw.cache;
script->ip = ip; /* set new IP while we're holding the locks */ script->ip = ip; /* set new IP while we're holding the locks */
STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions); STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
}
spin_unlock_irqrestore(&unw.lock, flags);
script->flags = 0; script->flags = 0;
script->hint = 0; script->hint = 0;
...@@ -1830,6 +1821,7 @@ find_save_locs (struct unw_frame_info *info) ...@@ -1830,6 +1821,7 @@ find_save_locs (struct unw_frame_info *info)
{ {
int have_write_lock = 0; int have_write_lock = 0;
struct unw_script *scr; struct unw_script *scr;
unsigned long flags = 0;
if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
/* don't let obviously bad addresses pollute the cache */ /* don't let obviously bad addresses pollute the cache */
...@@ -1841,8 +1833,10 @@ find_save_locs (struct unw_frame_info *info) ...@@ -1841,8 +1833,10 @@ find_save_locs (struct unw_frame_info *info)
scr = script_lookup(info); scr = script_lookup(info);
if (!scr) { if (!scr) {
spin_lock_irqsave(&unw.lock, flags);
scr = build_script(info); scr = build_script(info);
if (!scr) { if (!scr) {
spin_unlock_irqrestore(&unw.lock, flags);
UNW_DPRINT(0, UNW_DPRINT(0,
"unwind.%s: failed to locate/build unwind script for ip %lx\n", "unwind.%s: failed to locate/build unwind script for ip %lx\n",
__FUNCTION__, info->ip); __FUNCTION__, info->ip);
...@@ -1855,9 +1849,10 @@ find_save_locs (struct unw_frame_info *info) ...@@ -1855,9 +1849,10 @@ find_save_locs (struct unw_frame_info *info)
run_script(scr, info); run_script(scr, info);
if (have_write_lock) if (have_write_lock) {
write_unlock(&scr->lock); write_unlock(&scr->lock);
else spin_unlock_irqrestore(&unw.lock, flags);
} else
read_unlock(&scr->lock); read_unlock(&scr->lock);
return 0; return 0;
} }
......
...@@ -305,8 +305,15 @@ swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handl ...@@ -305,8 +305,15 @@ swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handl
flags |= GFP_DMA; flags |= GFP_DMA;
ret = (void *)__get_free_pages(flags, get_order(size)); ret = (void *)__get_free_pages(flags, get_order(size));
if (!ret) if (!ret) {
return NULL; /* DMA_FROM_DEVICE is to avoid the memcpy in map_single */
dma_addr_t handle;
handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
if (dma_mapping_error(handle))
return NULL;
ret = phys_to_virt(handle);
}
memset(ret, 0, size); memset(ret, 0, size);
dev_addr = virt_to_phys(ret); dev_addr = virt_to_phys(ret);
...@@ -319,7 +326,12 @@ swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handl ...@@ -319,7 +326,12 @@ swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handl
void void
swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{ {
free_pages((unsigned long) vaddr, get_order(size)); if (!(vaddr >= (void *)io_tlb_start
&& vaddr < (void *)io_tlb_end))
free_pages((unsigned long) vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
} }
static void swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) static void swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
......
...@@ -681,7 +681,7 @@ void paging_init(void) ...@@ -681,7 +681,7 @@ void paging_init(void)
PAGE_ALIGN(max_low_pfn * sizeof(struct page)); PAGE_ALIGN(max_low_pfn * sizeof(struct page));
vmem_map = (struct page *) vmalloc_end; vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, 0); efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map); printk("Virtual mem_map starts at 0x%p\n", vmem_map);
} }
......
...@@ -70,7 +70,7 @@ static int __init topology_init(void) ...@@ -70,7 +70,7 @@ static int __init topology_init(void)
memset(sysfs_cpus, 0, sizeof(struct cpu) * NR_CPUS); memset(sysfs_cpus, 0, sizeof(struct cpu) * NR_CPUS);
for (i = 0; i < numnodes; i++) for (i = 0; i < numnodes; i++)
if ((err = register_node(&sysfs_nodes[i], i, 0))) if ((err = register_node(&sysfs_nodes[i], i, NULL)))
goto out; goto out;
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
......
...@@ -275,7 +275,7 @@ add_window (struct acpi_resource *res, void *data) ...@@ -275,7 +275,7 @@ add_window (struct acpi_resource *res, void *data)
return AE_OK; return AE_OK;
window = &info->controller->window[info->controller->windows++]; window = &info->controller->window[info->controller->windows++];
window->resource.flags |= flags; window->resource.flags = flags;
window->resource.start = addr.min_address_range; window->resource.start = addr.min_address_range;
window->resource.end = addr.max_address_range; window->resource.end = addr.max_address_range;
window->offset = offset; window->offset = offset;
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
extern struct sn_irq_info **sn_irq; extern struct sn_irq_info **sn_irq;
#define SN_PCIDEV_INFO(pci_dev) \ #define SN_PCIDEV_INFO(pci_dev) \
((struct pcidev_info *)((pci_dev)->sysdata)) ((struct pcidev_info *)(pci_dev)->sysdata)
/* /*
* Given a pci_bus, return the sn pcibus_bussoft struct. Note that * Given a pci_bus, return the sn pcibus_bussoft struct. Note that
...@@ -48,4 +48,7 @@ struct pcidev_info { ...@@ -48,4 +48,7 @@ struct pcidev_info {
struct sn_irq_info *pdi_sn_irq_info; struct sn_irq_info *pdi_sn_irq_info;
}; };
extern void sn_irq_fixup(struct pci_dev *pci_dev,
struct sn_irq_info *sn_irq_info);
#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */ #endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
*/ */
#define PIC_ATE_TARGETID_SHFT 8 #define PIC_ATE_TARGETID_SHFT 8
#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFF #define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL
#define PIC_PCI64_ATTR_TARG_SHFT 60 #define PIC_PCI64_ATTR_TARG_SHFT 60
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#ifndef _ASM_IA64_SN_PCI_TIOCP_H #ifndef _ASM_IA64_SN_PCI_TIOCP_H
#define _ASM_IA64_SN_PCI_TIOCP_H #define _ASM_IA64_SN_PCI_TIOCP_H
#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFF #define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60) #define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
......
...@@ -23,17 +23,17 @@ ...@@ -23,17 +23,17 @@
* SH_JUNK_BUS_LED0-3, defined in shub_mmr.h * SH_JUNK_BUS_LED0-3, defined in shub_mmr.h
* *
*/ */
#define SH_REAL_JUNK_BUS_LED0 0x7fed00000 #define SH_REAL_JUNK_BUS_LED0 0x7fed00000UL
#define SH_REAL_JUNK_BUS_LED1 0x7fed10000 #define SH_REAL_JUNK_BUS_LED1 0x7fed10000UL
#define SH_REAL_JUNK_BUS_LED2 0x7fed20000 #define SH_REAL_JUNK_BUS_LED2 0x7fed20000UL
#define SH_REAL_JUNK_BUS_LED3 0x7fed30000 #define SH_REAL_JUNK_BUS_LED3 0x7fed30000UL
#define SH_JUNK_BUS_UART0 0x7fed40000 #define SH_JUNK_BUS_UART0 0x7fed40000UL
#define SH_JUNK_BUS_UART1 0x7fed40008 #define SH_JUNK_BUS_UART1 0x7fed40008UL
#define SH_JUNK_BUS_UART2 0x7fed40010 #define SH_JUNK_BUS_UART2 0x7fed40010UL
#define SH_JUNK_BUS_UART3 0x7fed40018 #define SH_JUNK_BUS_UART3 0x7fed40018UL
#define SH_JUNK_BUS_UART4 0x7fed40020 #define SH_JUNK_BUS_UART4 0x7fed40020UL
#define SH_JUNK_BUS_UART5 0x7fed40028 #define SH_JUNK_BUS_UART5 0x7fed40028UL
#define SH_JUNK_BUS_UART6 0x7fed40030 #define SH_JUNK_BUS_UART6 0x7fed40030UL
#define SH_JUNK_BUS_UART7 0x7fed40038 #define SH_JUNK_BUS_UART7 0x7fed40038UL
#endif /* _ASM_IA64_SN_SHUB_H */ #endif /* _ASM_IA64_SN_SHUB_H */
...@@ -200,8 +200,6 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) ...@@ -200,8 +200,6 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
struct sn_irq_info *sn_irq_info; struct sn_irq_info *sn_irq_info;
struct pci_dev *host_pci_dev; struct pci_dev *host_pci_dev;
int status = 0; int status = 0;
extern void sn_irq_fixup(struct pci_dev *pci_dev,
struct sn_irq_info *sn_irq_info);
SN_PCIDEV_INFO(dev) = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); SN_PCIDEV_INFO(dev) = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (SN_PCIDEV_INFO(dev) <= 0) if (SN_PCIDEV_INFO(dev) <= 0)
......
...@@ -27,7 +27,7 @@ void *sn_io_addr(unsigned long port) ...@@ -27,7 +27,7 @@ void *sn_io_addr(unsigned long port)
if (!IS_RUNNING_ON_SIMULATOR()) { if (!IS_RUNNING_ON_SIMULATOR()) {
/* On sn2, legacy I/O ports don't point at anything */ /* On sn2, legacy I/O ports don't point at anything */
if (port < (64 * 1024)) if (port < (64 * 1024))
return 0; return NULL;
return ((void *)(port | __IA64_UNCACHED_OFFSET)); return ((void *)(port | __IA64_UNCACHED_OFFSET));
} else { } else {
/* but the simulator uses them... */ /* but the simulator uses them... */
...@@ -41,9 +41,8 @@ void *sn_io_addr(unsigned long port) ...@@ -41,9 +41,8 @@ void *sn_io_addr(unsigned long port)
*/ */
if ((port >= 0x1f0 && port <= 0x1f7) || if ((port >= 0x1f0 && port <= 0x1f7) ||
port == 0x3f6 || port == 0x3f7) { port == 0x3f6 || port == 0x3f7) {
io_base = io_base = (0xc000000fcc000000UL |
(0xc000000fcc000000 | ((unsigned long)get_nasid() << 38));
((unsigned long)get_nasid() << 38));
addr = io_base | ((port >> 2) << 12) | (port & 0xfff); addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
} else { } else {
addr = __ia64_get_io_port_base() | ((port >> 2) << 2); addr = __ia64_get_io_port_base() | ((port >> 2) << 2);
...@@ -69,11 +68,10 @@ EXPORT_SYMBOL(sn_io_addr); ...@@ -69,11 +68,10 @@ EXPORT_SYMBOL(sn_io_addr);
*/ */
void sn_mmiob(void) void sn_mmiob(void)
{ {
while ((((volatile unsigned long)(*pda-> while ((((volatile unsigned long)(*pda->pio_write_status_addr)) &
pio_write_status_addr)) &
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) != SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
udelay(1); cpu_relax();
} }
EXPORT_SYMBOL(sn_mmiob); EXPORT_SYMBOL(sn_mmiob);
...@@ -90,7 +90,7 @@ int numionodes; ...@@ -90,7 +90,7 @@ int numionodes;
* early_printk won't try to access the UART before * early_printk won't try to access the UART before
* master_node_bedrock_address is properly calculated. * master_node_bedrock_address is properly calculated.
*/ */
u64 master_node_bedrock_address; u64 __iomem *master_node_bedrock_address;
static void sn_init_pdas(char **); static void sn_init_pdas(char **);
static void scan_for_ionodes(void); static void scan_for_ionodes(void);
...@@ -196,10 +196,10 @@ void __init early_sn_setup(void) ...@@ -196,10 +196,10 @@ void __init early_sn_setup(void)
} }
if (IS_RUNNING_ON_SIMULATOR()) { if (IS_RUNNING_ON_SIMULATOR()) {
master_node_bedrock_address = master_node_bedrock_address = (u64 __iomem *)
(u64) REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0); REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0);
printk(KERN_DEBUG printk(KERN_DEBUG "early_sn_setup: setting "
"early_sn_setup: setting master_node_bedrock_address to 0x%lx\n", "master_node_bedrock_address to 0x%p\n",
master_node_bedrock_address); master_node_bedrock_address);
} }
} }
...@@ -313,10 +313,10 @@ void __init sn_setup(char **cmdline_p) ...@@ -313,10 +313,10 @@ void __init sn_setup(char **cmdline_p)
platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR; platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
if (IS_RUNNING_ON_SIMULATOR()) { if (IS_RUNNING_ON_SIMULATOR()) {
master_node_bedrock_address = master_node_bedrock_address = (u64 __iomem *)
(u64) REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0); REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0);
printk(KERN_DEBUG printk(KERN_DEBUG "sn_setup: setting "
"sn_setup: setting master_node_bedrock_address to 0x%lx\n", "master_node_bedrock_address to 0x%p\n",
master_node_bedrock_address); master_node_bedrock_address);
} }
......
...@@ -58,42 +58,42 @@ void __sn_outl(unsigned int val, unsigned long port) ...@@ -58,42 +58,42 @@ void __sn_outl(unsigned int val, unsigned long port)
___sn_outl(val, port); ___sn_outl(val, port);
} }
unsigned char __sn_readb(void *addr) unsigned char __sn_readb(void __iomem *addr)
{ {
return ___sn_readb(addr); return ___sn_readb(addr);
} }
unsigned short __sn_readw(void *addr) unsigned short __sn_readw(void __iomem *addr)
{ {
return ___sn_readw(addr); return ___sn_readw(addr);
} }
unsigned int __sn_readl(void *addr) unsigned int __sn_readl(void __iomem *addr)
{ {
return ___sn_readl(addr); return ___sn_readl(addr);
} }
unsigned long __sn_readq(void *addr) unsigned long __sn_readq(void __iomem *addr)
{ {
return ___sn_readq(addr); return ___sn_readq(addr);
} }
unsigned char __sn_readb_relaxed(void *addr) unsigned char __sn_readb_relaxed(void __iomem *addr)
{ {
return ___sn_readb_relaxed(addr); return ___sn_readb_relaxed(addr);
} }
unsigned short __sn_readw_relaxed(void *addr) unsigned short __sn_readw_relaxed(void __iomem *addr)
{ {
return ___sn_readw_relaxed(addr); return ___sn_readw_relaxed(addr);
} }
unsigned int __sn_readl_relaxed(void *addr) unsigned int __sn_readl_relaxed(void __iomem *addr)
{ {
return ___sn_readl_relaxed(addr); return ___sn_readl_relaxed(addr);
} }
unsigned long __sn_readq_relaxed(void *addr) unsigned long __sn_readq_relaxed(void __iomem *addr)
{ {
return ___sn_readq_relaxed(addr); return ___sn_readq_relaxed(addr);
} }
......
...@@ -91,122 +91,11 @@ static const char *fit_type_name(unsigned char type) ...@@ -91,122 +91,11 @@ static const char *fit_type_name(unsigned char type)
return "Unknown type"; return "Unknown type";
} }
/* ============ BEGIN temp til old PROMs are no longer supported =============
*
* The OS should not make direct access to the PROM flash memory. Access to
* this region must be serialized with a PROM lock. If SAL on one cpu is
* updating the FLASH error log at the same time another cpu is accessing the
* PROM, data corruption will occur.
*
* To solve the problem, all flash PROM access has been moved to SAL. Because
* not all systems will have instant PROM updates, we need to support a new OS
* running on a system with old PROMs.
*
* This code should be deleted after 1 OS/PROM release has occurred & the OS
* no longer supports downrev PROMs. (PROM support should be in the 3.50
* PROMs).
*/
#define SUPPORT_OLD_PROMS
#ifdef SUPPORT_OLD_PROMS
#define FIT_SIGNATURE 0x2020205f5449465ful
/* Sub-regions determined by bits in Node Offset */
#define LB_PROM_SPACE 0x0000000700000000ul /* Local LB PROM */
/* Offset of PROM banner pointers in SAL A and SAL B */
#define SAL_A_BANNER_OFFSET (1 * 16)
#define SAL_B_BANNER_OFFSET (3 * 16)
/* Architected IA64 firmware space */
#define FW_BASE 0x00000000FF000000
#define FW_TOP 0x0000000100000000
static unsigned long convert_fw_addr(nasid_t nasid, unsigned long addr)
{
/* snag just the node-relative offset */
addr &= ~0ul >> (63 - 35);
/* the pointer to SAL A is relative to IA-64 compatibility
* space. However, the PROM is mapped at a different offset
* in MMR space (both local and global)
*/
addr += 0x700000000;
return GLOBAL_MMR_ADDR(nasid, addr);
}
static int valid_fw_addr(unsigned long addr)
{
addr &= ~(1ul << 63); /* Clear cached/uncached bit */
return (addr >= FW_BASE && addr < FW_TOP);
}
static unsigned long *
lookup_fit(int nasid)
{
unsigned long *fitp;
unsigned long fit_paddr;
unsigned long *fit_vaddr;
fitp = (void *)GLOBAL_MMR_ADDR(nasid, LB_PROM_SPACE - 32);
fit_paddr = readq(fitp);
fit_vaddr = (unsigned long *) convert_fw_addr(nasid, fit_paddr);
return fit_vaddr;
}
#endif /* SUPPORT_OLD_PROMS */
/* ============ END temp til old PROMs are no longer supported ============= */
static int static int
get_fit_entry(unsigned long nasid, int index, unsigned long *fentry, get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
char *banner, int banlen) char *banner, int banlen)
{ {
int ret; return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen);
ret = ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen);
#ifdef SUPPORT_OLD_PROMS
/* The following is hack is temporary until PROMs are updated */
if (ret == SALRET_NOT_IMPLEMENTED) {
unsigned long *fitadr = lookup_fit(nasid);
int nentries;
if (readq(fitadr) != FIT_SIGNATURE) {
printk(KERN_WARNING "Unrecognized FIT signature");
return -2;
}
nentries = (unsigned int) (readq(fitadr + 1) & 0xffffff);
if (index >= nentries)
return -2;
fentry[0] = readq(fitadr + 2 * index);
fentry[1] = readq(fitadr + 2 * index + 1);
ret = 0;
if (banner && FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A) {
unsigned long i, qw, *bwp, *qwp;
banner[0] = '\0';
qw = fentry[0]; /* Address of SAL A */
if (!valid_fw_addr(qw))
return 0;
qw += SAL_A_BANNER_OFFSET;
qw = convert_fw_addr(nasid, qw);
qw = readq(qw); /* Address of banner */
if (!valid_fw_addr(qw))
return 0;
qw = convert_fw_addr(nasid, qw);
qwp = (unsigned long *) qw;
bwp = (unsigned long *) banner;
for (i=0; i<banlen/8; i++)
bwp[i] = qwp[i];
}
}
#endif /* SUPPORT_OLD_PROMS */
return ret;
} }
......
...@@ -36,18 +36,18 @@ ...@@ -36,18 +36,18 @@
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm-ia64/sal.h> #include <asm/sal.h>
#include <asm-ia64/sn/sn_sal.h> #include <asm/sn/io.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/module.h> #include <asm/sn/module.h>
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
#include <asm-ia64/sn/sn2/sn_hwperf.h> #include <asm/sn/sn2/sn_hwperf.h>
static void *sn_hwperf_salheap = NULL; static void *sn_hwperf_salheap = NULL;
static int sn_hwperf_obj_cnt = 0; static int sn_hwperf_obj_cnt = 0;
static nasid_t sn_hwperf_master_nasid = INVALID_NASID; static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void); static int sn_hwperf_init(void);
static DECLARE_MUTEX(sn_hwperf_init_mutex); static DECLARE_MUTEX(sn_hwperf_init_mutex);
extern int numionodes;
static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret) static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
{ {
...@@ -407,7 +407,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) ...@@ -407,7 +407,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
r = -EINVAL; r = -EINVAL;
goto error; goto error;
} }
r = copy_from_user(&a, (const void *)arg, r = copy_from_user(&a, (const void __user *)arg,
sizeof(struct sn_hwperf_ioctl_args)); sizeof(struct sn_hwperf_ioctl_args));
if (r != 0) { if (r != 0) {
r = -EFAULT; r = -EFAULT;
...@@ -428,7 +428,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) ...@@ -428,7 +428,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
} }
if (op & SN_HWPERF_OP_MEM_COPYIN) { if (op & SN_HWPERF_OP_MEM_COPYIN) {
r = copy_from_user(p, (const void *)a.ptr, a.sz); r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
if (r != 0) { if (r != 0) {
r = -EFAULT; r = -EFAULT;
goto error; goto error;
...@@ -528,7 +528,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) ...@@ -528,7 +528,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
} }
if (op & SN_HWPERF_OP_MEM_COPYOUT) { if (op & SN_HWPERF_OP_MEM_COPYOUT) {
r = copy_to_user((void *)a.ptr, p, a.sz); r = copy_to_user((void __user *)a.ptr, p, a.sz);
if (r != 0) { if (r != 0) {
r = -EFAULT; r = -EFAULT;
goto error; goto error;
......
...@@ -62,9 +62,14 @@ static int sn_force_interrupt_show(struct seq_file *s, void *p) ...@@ -62,9 +62,14 @@ static int sn_force_interrupt_show(struct seq_file *s, void *p)
} }
static ssize_t sn_force_interrupt_write_proc(struct file *file, static ssize_t sn_force_interrupt_write_proc(struct file *file,
const __user char *buffer, size_t count, loff_t *data) const char __user *buffer, size_t count, loff_t *data)
{ {
sn_force_interrupt_flag = (*buffer == '0') ? 0 : 1; char val;
if (copy_from_user(&val, buffer, 1))
return -EFAULT;
sn_force_interrupt_flag = (val == '0') ? 0 : 1;
return count; return count;
} }
...@@ -116,7 +121,7 @@ void register_sn_procfs(void) ...@@ -116,7 +121,7 @@ void register_sn_procfs(void)
struct proc_dir_entry *e; struct proc_dir_entry *e;
BUG_ON(sgi_proc_dir != NULL); BUG_ON(sgi_proc_dir != NULL);
if (!(sgi_proc_dir = proc_mkdir("sgi_sn", 0))) if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
return; return;
sn_procfs_create_entry("partition_id", sgi_proc_dir, sn_procfs_create_entry("partition_id", sgi_proc_dir,
......
...@@ -474,3 +474,4 @@ EXPORT_SYMBOL(sn_pci_unmap_sg); ...@@ -474,3 +474,4 @@ EXPORT_SYMBOL(sn_pci_unmap_sg);
EXPORT_SYMBOL(sn_pci_alloc_consistent); EXPORT_SYMBOL(sn_pci_alloc_consistent);
EXPORT_SYMBOL(sn_pci_free_consistent); EXPORT_SYMBOL(sn_pci_free_consistent);
EXPORT_SYMBOL(sn_pci_dma_supported); EXPORT_SYMBOL(sn_pci_dma_supported);
EXPORT_SYMBOL(sn_dma_mapping_error);
...@@ -133,6 +133,10 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, ...@@ -133,6 +133,10 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
} else } else
pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
pci_addr |= PCI64_ATTR_VIRTUAL;
return pci_addr; return pci_addr;
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/module.h> #include <asm/sn/module.h>
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
......
...@@ -108,7 +108,7 @@ static struct sn_cons_port sal_console_port; ...@@ -108,7 +108,7 @@ static struct sn_cons_port sal_console_port;
/* Only used if USE_DYNAMIC_MINOR is set to 1 */ /* Only used if USE_DYNAMIC_MINOR is set to 1 */
static struct miscdevice misc; /* used with misc_register for dynamic */ static struct miscdevice misc; /* used with misc_register for dynamic */
extern u64 master_node_bedrock_address; extern u64 __iomem *master_node_bedrock_address;
extern void early_sn_setup(void); extern void early_sn_setup(void);
#undef DEBUG #undef DEBUG
......
...@@ -62,14 +62,14 @@ typedef unsigned int ia64_mv_inl_t (unsigned long); ...@@ -62,14 +62,14 @@ typedef unsigned int ia64_mv_inl_t (unsigned long);
typedef void ia64_mv_outb_t (unsigned char, unsigned long); typedef void ia64_mv_outb_t (unsigned char, unsigned long);
typedef void ia64_mv_outw_t (unsigned short, unsigned long); typedef void ia64_mv_outw_t (unsigned short, unsigned long);
typedef void ia64_mv_outl_t (unsigned int, unsigned long); typedef void ia64_mv_outl_t (unsigned int, unsigned long);
typedef unsigned char ia64_mv_readb_t (void *); typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
typedef unsigned short ia64_mv_readw_t (void *); typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
typedef unsigned int ia64_mv_readl_t (void *); typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
typedef unsigned long ia64_mv_readq_t (void *); typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
typedef unsigned char ia64_mv_readb_relaxed_t (void *); typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
typedef unsigned short ia64_mv_readw_relaxed_t (void *); typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
typedef unsigned int ia64_mv_readl_relaxed_t (void *); typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
typedef unsigned long ia64_mv_readq_relaxed_t (void *); typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
static inline void static inline void
machvec_noop (void) machvec_noop (void)
......
...@@ -69,27 +69,27 @@ typedef union ia64_sn2_pa { ...@@ -69,27 +69,27 @@ typedef union ia64_sn2_pa {
} ia64_sn2_pa_t; } ia64_sn2_pa_t;
#endif #endif
#define TO_PHYS_MASK 0x0001ffcfffffffff /* Note - clear AS bits */ #define TO_PHYS_MASK 0x0001ffcfffffffffUL /* Note - clear AS bits */
/* Regions determined by AS */ /* Regions determined by AS */
#define LOCAL_MMR_SPACE 0xc000008000000000 /* Local MMR space */ #define LOCAL_MMR_SPACE 0xc000008000000000UL /* Local MMR space */
#define LOCAL_PHYS_MMR_SPACE 0x8000008000000000 /* Local PhysicalMMR space */ #define LOCAL_PHYS_MMR_SPACE 0x8000008000000000UL /* Local PhysicalMMR space */
#define LOCAL_MEM_SPACE 0xc000010000000000 /* Local Memory space */ #define LOCAL_MEM_SPACE 0xc000010000000000UL /* Local Memory space */
/* It so happens that setting bit 35 indicates a reference to the SHUB or TIO /* It so happens that setting bit 35 indicates a reference to the SHUB or TIO
* MMR space. * MMR space.
*/ */
#define GLOBAL_MMR_SPACE 0xc000000800000000 /* Global MMR space */ #define GLOBAL_MMR_SPACE 0xc000000800000000UL /* Global MMR space */
#define TIO_MMR_SPACE 0xc000000800000000 /* TIO MMR space */ #define TIO_MMR_SPACE 0xc000000800000000UL /* TIO MMR space */
#define ICE_MMR_SPACE 0xc000000000000000 /* ICE MMR space */ #define ICE_MMR_SPACE 0xc000000000000000UL /* ICE MMR space */
#define GLOBAL_PHYS_MMR_SPACE 0x0000000800000000 /* Global Physical MMR space */ #define GLOBAL_PHYS_MMR_SPACE 0x0000000800000000UL /* Global Physical MMR space */
#define GET_SPACE 0xe000001000000000 /* GET space */ #define GET_SPACE 0xe000001000000000UL /* GET space */
#define AMO_SPACE 0xc000002000000000 /* AMO space */ #define AMO_SPACE 0xc000002000000000UL /* AMO space */
#define CACHEABLE_MEM_SPACE 0xe000003000000000 /* Cacheable memory space */ #define CACHEABLE_MEM_SPACE 0xe000003000000000UL /* Cacheable memory space */
#define UNCACHED 0xc000000000000000 /* UnCacheable memory space */ #define UNCACHED 0xc000000000000000UL /* UnCacheable memory space */
#define UNCACHED_PHYS 0x8000000000000000 /* UnCacheable physical memory space */ #define UNCACHED_PHYS 0x8000000000000000UL /* UnCacheable physical memory space */
#define PHYS_MEM_SPACE 0x0000003000000000 /* physical memory space */ #define PHYS_MEM_SPACE 0x0000003000000000UL /* physical memory space */
/* SN2 address macros */ /* SN2 address macros */
/* NID_SHFT has the right value for both SHUB and TIO addresses.*/ /* NID_SHFT has the right value for both SHUB and TIO addresses.*/
...@@ -105,7 +105,7 @@ typedef union ia64_sn2_pa { ...@@ -105,7 +105,7 @@ typedef union ia64_sn2_pa {
#define GLOBAL_MEM_ADDR(n,a) (CACHEABLE_MEM_SPACE | REMOTE_ADDR(n,a)) #define GLOBAL_MEM_ADDR(n,a) (CACHEABLE_MEM_SPACE | REMOTE_ADDR(n,a))
/* non-II mmr's start at top of big window space (4G) */ /* non-II mmr's start at top of big window space (4G) */
#define BWIN_TOP 0x0000000100000000 #define BWIN_TOP 0x0000000100000000UL
/* /*
* general address defines - for code common to SN0/SN1/SN2 * general address defines - for code common to SN0/SN1/SN2
...@@ -256,7 +256,7 @@ typedef union ia64_sn2_pa { ...@@ -256,7 +256,7 @@ typedef union ia64_sn2_pa {
(((~(_x)) & BWIN_TOP)>>9) | (_x)) (((~(_x)) & BWIN_TOP)>>9) | (_x))
#define REMOTE_HUB(_n, _x) \ #define REMOTE_HUB(_n, _x) \
((volatile uint64_t *)(REMOTE_HUB_BASE(_x) | ((((long)(_n))<<NASID_SHFT)))) ((uint64_t *)(REMOTE_HUB_BASE(_x) | ((((long)(_n))<<NASID_SHFT))))
/* /*
......
...@@ -129,44 +129,44 @@ ___sn_outl (unsigned int val, unsigned long port) ...@@ -129,44 +129,44 @@ ___sn_outl (unsigned int val, unsigned long port)
*/ */
static inline unsigned char static inline unsigned char
___sn_readb (void *addr) ___sn_readb (const volatile void __iomem *addr)
{ {
unsigned char val; unsigned char val;
val = *(volatile unsigned char *)addr; val = *(volatile unsigned char __force *)addr;
__sn_mf_a(); __sn_mf_a();
sn_dma_flush((unsigned long)addr); sn_dma_flush((unsigned long)addr);
return val; return val;
} }
static inline unsigned short static inline unsigned short
___sn_readw (void *addr) ___sn_readw (const volatile void __iomem *addr)
{ {
unsigned short val; unsigned short val;
val = *(volatile unsigned short *)addr; val = *(volatile unsigned short __force *)addr;
__sn_mf_a(); __sn_mf_a();
sn_dma_flush((unsigned long)addr); sn_dma_flush((unsigned long)addr);
return val; return val;
} }
static inline unsigned int static inline unsigned int
___sn_readl (void *addr) ___sn_readl (const volatile void __iomem *addr)
{ {
unsigned int val; unsigned int val;
val = *(volatile unsigned int *) addr; val = *(volatile unsigned int __force *)addr;
__sn_mf_a(); __sn_mf_a();
sn_dma_flush((unsigned long)addr); sn_dma_flush((unsigned long)addr);
return val; return val;
} }
static inline unsigned long static inline unsigned long
___sn_readq (void *addr) ___sn_readq (const volatile void __iomem *addr)
{ {
unsigned long val; unsigned long val;
val = *(volatile unsigned long *) addr; val = *(volatile unsigned long __force *)addr;
__sn_mf_a(); __sn_mf_a();
sn_dma_flush((unsigned long)addr); sn_dma_flush((unsigned long)addr);
return val; return val;
...@@ -215,27 +215,27 @@ sn_inl_fast (unsigned long port) ...@@ -215,27 +215,27 @@ sn_inl_fast (unsigned long port)
} }
static inline unsigned char static inline unsigned char
___sn_readb_relaxed (void *addr) ___sn_readb_relaxed (const volatile void __iomem *addr)
{ {
return *(volatile unsigned char *)addr; return *(volatile unsigned char __force *)addr;
} }
static inline unsigned short static inline unsigned short
___sn_readw_relaxed (void *addr) ___sn_readw_relaxed (const volatile void __iomem *addr)
{ {
return *(volatile unsigned short *)addr; return *(volatile unsigned short __force *)addr;
} }
static inline unsigned int static inline unsigned int
___sn_readl_relaxed (void *addr) ___sn_readl_relaxed (const volatile void __iomem *addr)
{ {
return *(volatile unsigned int *) addr; return *(volatile unsigned int __force *) addr;
} }
static inline unsigned long static inline unsigned long
___sn_readq_relaxed (void *addr) ___sn_readq_relaxed (const volatile void __iomem *addr)
{ {
return *(volatile unsigned long *) addr; return *(volatile unsigned long __force *) addr;
} }
struct pci_dev; struct pci_dev;
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -58,7 +58,26 @@ void build_cpu_to_node_map(void); ...@@ -58,7 +58,26 @@ void build_cpu_to_node_map(void);
| SD_BALANCE_EXEC \ | SD_BALANCE_EXEC \
| SD_WAKE_BALANCE, \ | SD_WAKE_BALANCE, \
.last_balance = jiffies, \ .last_balance = jiffies, \
.balance_interval = 10, \ .balance_interval = 1, \
.nr_balance_failed = 0, \
}
/* sched_domains SD_ALLNODES_INIT for IA64 NUMA machines */
#define SD_ALLNODES_INIT (struct sched_domain) { \
.span = CPU_MASK_NONE, \
.parent = NULL, \
.groups = NULL, \
.min_interval = 80, \
.max_interval = 320, \
.busy_factor = 320, \
.imbalance_pct = 125, \
.cache_hot_time = (10*1000000), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC, \
.last_balance = jiffies, \
.balance_interval = 100*(63+num_online_cpus())/64, \
.nr_balance_failed = 0, \ .nr_balance_failed = 0, \
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment