Commit 19cdce9c authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 2b738648 50155729
This diff is collapsed.
This diff is collapsed.
...@@ -130,20 +130,6 @@ simscsi_info (struct Scsi_Host *host) ...@@ -130,20 +130,6 @@ simscsi_info (struct Scsi_Host *host)
return "simulated SCSI host adapter"; return "simulated SCSI host adapter";
} }
int
simscsi_abort (Scsi_Cmnd *cmd)
{
printk ("simscsi_abort: unimplemented\n");
return SCSI_ABORT_SUCCESS;
}
int
simscsi_reset (Scsi_Cmnd *cmd, unsigned int reset_flags)
{
printk ("simscsi_reset: unimplemented\n");
return SCSI_RESET_SUCCESS;
}
int int
simscsi_biosparam (struct scsi_device *sdev, struct block_device *n, simscsi_biosparam (struct scsi_device *sdev, struct block_device *n,
sector_t capacity, int ip[]) sector_t capacity, int ip[])
......
...@@ -20,21 +20,19 @@ extern int simscsi_reset (Scsi_Cmnd *, unsigned int); ...@@ -20,21 +20,19 @@ extern int simscsi_reset (Scsi_Cmnd *, unsigned int);
extern int simscsi_biosparam (struct scsi_device *, struct block_device *, extern int simscsi_biosparam (struct scsi_device *, struct block_device *,
sector_t, int[]); sector_t, int[]);
#define SIMSCSI { \ #define SIMSCSI { \
detect: simscsi_detect, \ .detect = simscsi_detect, \
release: simscsi_release, \ .release = simscsi_release, \
info: simscsi_info, \ .info = simscsi_info, \
queuecommand: simscsi_queuecommand, \ .queuecommand = simscsi_queuecommand, \
abort: simscsi_abort, \ .bios_param = simscsi_biosparam, \
reset: simscsi_reset, \ .can_queue = SIMSCSI_REQ_QUEUE_LEN, \
bios_param: simscsi_biosparam, \ .this_id = -1, \
can_queue: SIMSCSI_REQ_QUEUE_LEN, \ .sg_tablesize = SG_ALL, \
this_id: -1, \ .cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN, \
sg_tablesize: SG_ALL, \ .present = 0, \
cmd_per_lun: SIMSCSI_REQ_QUEUE_LEN, \ .unchecked_isa_dma = 0, \
present: 0, \ .use_clustering = DISABLE_CLUSTERING \
unchecked_isa_dma: 0, \
use_clustering: DISABLE_CLUSTERING \
} }
#endif /* SIMSCSI_H */ #endif /* SIMSCSI_H */
...@@ -1245,15 +1245,15 @@ sys_call_table: ...@@ -1245,15 +1245,15 @@ sys_call_table:
data8 sys_alloc_hugepages data8 sys_alloc_hugepages
data8 sys_free_hugepages // 1235 data8 sys_free_hugepages // 1235
data8 sys_exit_group data8 sys_exit_group
data8 ia64_ni_syscall data8 sys_lookup_dcookie
data8 sys_io_setup data8 sys_io_setup
data8 sys_io_destroy data8 sys_io_destroy
data8 sys_io_getevents // 1240 data8 sys_io_getevents // 1240
data8 sys_io_submit data8 sys_io_submit
data8 sys_io_cancel data8 sys_io_cancel
data8 ia64_ni_syscall data8 sys_epoll_create
data8 ia64_ni_syscall data8 sys_epoll_ctl
data8 ia64_ni_syscall // 1245 data8 sys_epoll_wait // 1245
data8 ia64_ni_syscall data8 ia64_ni_syscall
data8 ia64_ni_syscall data8 ia64_ni_syscall
data8 ia64_ni_syscall data8 ia64_ni_syscall
......
...@@ -625,21 +625,18 @@ cpu_init (void) ...@@ -625,21 +625,18 @@ cpu_init (void)
extern char __per_cpu_end[]; extern char __per_cpu_end[];
int cpu; int cpu;
if (__per_cpu_end - __per_cpu_start > PAGE_SIZE)
panic("Per-cpu data area too big! (%Zu > %Zu)",
__per_cpu_end - __per_cpu_start, PAGE_SIZE);
/* /*
* get_free_pages() cannot be used before cpu_init() done. BSP allocates * get_free_pages() cannot be used before cpu_init() done. BSP allocates
* "NR_CPUS" pages for all CPUs to avoid that AP calls get_zeroed_page(). * "NR_CPUS" pages for all CPUs to avoid that AP calls get_zeroed_page().
*/ */
if (smp_processor_id() == 0) { if (smp_processor_id() == 0) {
cpu_data = (unsigned long)alloc_bootmem_pages(PAGE_SIZE * NR_CPUS); cpu_data = (unsigned long) __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PAGE_SIZE; cpu_data += PERCPU_PAGE_SIZE;
} }
} }
cpu_data = __per_cpu_start + __per_cpu_offset[smp_processor_id()]; cpu_data = __per_cpu_start + __per_cpu_offset[smp_processor_id()];
...@@ -650,7 +647,6 @@ cpu_init (void) ...@@ -650,7 +647,6 @@ cpu_init (void)
cpu_info = cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start); cpu_info = cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
cpu_info->node_data = get_node_data_ptr(); cpu_info->node_data = get_node_data_ptr();
cpu_info->nodeid = boot_get_local_nodeid();
#endif #endif
/* /*
......
...@@ -430,30 +430,39 @@ smp_build_cpu_map (void) ...@@ -430,30 +430,39 @@ smp_build_cpu_map (void)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
char cpu_to_node_map[NR_CPUS] __cacheline_aligned; /* on which node is each logical CPU (one cacheline even for 64 CPUs) */
volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
/* which logical CPUs are on which nodes */
volatile unsigned long node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
/* /*
* Build cpu to node mapping. * Build cpu to node mapping and initialize the per node cpu masks.
*/ */
void __init void __init
build_cpu_to_node_map (void) build_cpu_to_node_map (void)
{ {
int cpu, i; int cpu, i, node;
for(node=0; node<MAX_NUMNODES; node++)
node_to_cpu_mask[node] = 0;
for(cpu = 0; cpu < NR_CPUS; ++cpu) { for(cpu = 0; cpu < NR_CPUS; ++cpu) {
/* /*
* All Itanium NUMA platforms I know use ACPI, so maybe we * All Itanium NUMA platforms I know use ACPI, so maybe we
* can drop this ifdef completely. [EF] * can drop this ifdef completely. [EF]
*/ */
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
node = -1;
for (i = 0; i < NR_CPUS; ++i) for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
cpu_to_node_map[cpu] = node_cpuid[i].nid; node = node_cpuid[i].nid;
break; break;
} }
#else #else
# error Fixme: Dunno how to build CPU-to-node map. # error Fixme: Dunno how to build CPU-to-node map.
#endif #endif
cpu_to_node_map[cpu] = node;
if (node >= 0)
node_to_cpu_mask[node] |= (1UL << cpu);
} }
} }
......
...@@ -287,7 +287,8 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -287,7 +287,8 @@ ia64_mmu_init (void *my_cpu_data)
ia64_srlz_d(); ia64_srlz_d();
ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), PAGE_SHIFT); pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
PERCPU_PAGE_SHIFT);
ia64_set_psr(psr); ia64_set_psr(psr);
ia64_srlz_i(); ia64_srlz_i();
...@@ -455,8 +456,6 @@ mem_init (void) ...@@ -455,8 +456,6 @@ mem_init (void)
if (num_pgt_pages > pgt_cache_water[1]) if (num_pgt_pages > pgt_cache_water[1])
pgt_cache_water[1] = num_pgt_pages; pgt_cache_water[1] = num_pgt_pages;
show_mem();
/* install the gate page in the global page table: */ /* install the gate page in the global page table: */
put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR); put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
......
...@@ -137,7 +137,7 @@ SECTIONS ...@@ -137,7 +137,7 @@ SECTIONS
{ *(.kstrtab) } { *(.kstrtab) }
/* Per-cpu data: */ /* Per-cpu data: */
. = ALIGN(PAGE_SIZE); . = ALIGN(PERCPU_PAGE_SIZE);
__phys_per_cpu_start = .; __phys_per_cpu_start = .;
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - PAGE_OFFSET) .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - PAGE_OFFSET)
{ {
...@@ -145,7 +145,7 @@ SECTIONS ...@@ -145,7 +145,7 @@ SECTIONS
*(.data.percpu) *(.data.percpu)
__per_cpu_end = .; __per_cpu_end = .;
} }
. = __phys_per_cpu_start + 4096; /* ensure percpu fits into smallest page size (4KB) */ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
.data : AT(ADDR(.data) - PAGE_OFFSET) .data : AT(ADDR(.data) - PAGE_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
......
...@@ -83,6 +83,7 @@ ide_init_default_hwifs (void) ...@@ -83,6 +83,7 @@ ide_init_default_hwifs (void)
int index; int index;
for(index = 0; index < MAX_HWIFS; index++) { for(index = 0; index < MAX_HWIFS; index++) {
memset(&hw, 0, sizeof hw);
ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL); ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
hw.irq = ide_default_irq(ide_default_io_base(index)); hw.irq = ide_default_irq(ide_default_io_base(index));
ide_register_hw(&hw, NULL); ide_register_hw(&hw, NULL);
......
...@@ -21,7 +21,9 @@ ...@@ -21,7 +21,9 @@
# define NR_MEMBLKS (NR_NODES * 8) # define NR_MEMBLKS (NR_NODES * 8)
#endif #endif
extern char cpu_to_node_map[NR_CPUS] __cacheline_aligned; #include <linux/cache.h>
extern volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern volatile unsigned long node_to_cpu_mask[NR_NODES] __cacheline_aligned;
/* Stuff below this line could be architecture independent */ /* Stuff below this line could be architecture independent */
......
...@@ -30,6 +30,9 @@ ...@@ -30,6 +30,9 @@
#define PAGE_MASK (~(PAGE_SIZE - 1)) #define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
# if defined(CONFIG_HUGETLB_PAGE_SIZE_4GB) # if defined(CONFIG_HUGETLB_PAGE_SIZE_4GB)
......
...@@ -202,7 +202,7 @@ ia64_phys_addr_valid (unsigned long addr) ...@@ -202,7 +202,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ #define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
#define RGN_KERNEL 7 #define RGN_KERNEL 7
#define VMALLOC_START (0xa000000000000000 + 3*PAGE_SIZE) #define VMALLOC_START (0xa000000000000000 + 3*PERCPU_PAGE_SIZE)
#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) #define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
......
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
/* /*
* poll(2) bit definitions. Chosen to be compatible with Linux/x86. * poll(2) bit definitions. Chosen to be compatible with Linux/x86.
* *
* Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#define POLLIN 0x0001 #define POLLIN 0x0001
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#define POLLWRNORM 0x0100 #define POLLWRNORM 0x0100
#define POLLWRBAND 0x0200 #define POLLWRBAND 0x0200
#define POLLMSG 0x0400 #define POLLMSG 0x0400
#define POLLREMOVE 0x1000
struct pollfd { struct pollfd {
int fd; int fd;
......
...@@ -179,7 +179,6 @@ struct cpuinfo_ia64 { ...@@ -179,7 +179,6 @@ struct cpuinfo_ia64 {
#endif #endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct ia64_node_data *node_data; struct ia64_node_data *node_data;
int nodeid;
#endif #endif
}; };
...@@ -192,10 +191,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); ...@@ -192,10 +191,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
#define local_cpu_data (&__get_cpu_var(cpu_info)) #define local_cpu_data (&__get_cpu_var(cpu_info))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) #define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
#ifdef CONFIG_NUMA
#define numa_node_id() (local_cpu_data->nodeid)
#endif
extern void identify_cpu (struct cpuinfo_ia64 *); extern void identify_cpu (struct cpuinfo_ia64 *);
extern void print_cpu_info (struct cpuinfo_ia64 *); extern void print_cpu_info (struct cpuinfo_ia64 *);
......
...@@ -20,8 +20,9 @@ ...@@ -20,8 +20,9 @@
#define KERNEL_START (PAGE_OFFSET + 68*1024*1024) #define KERNEL_START (PAGE_OFFSET + 68*1024*1024)
#define GATE_ADDR (0xa000000000000000 + PAGE_SIZE) /* 0xa000000000000000 - 0xa000000000000000+PERCPU_MAX_SIZE remain unmapped */
#define PERCPU_ADDR (0xa000000000000000 + 2*PAGE_SIZE) #define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
#define GATE_ADDR (0xa000000000000000 + 2*PERCPU_PAGE_SIZE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -15,12 +15,22 @@ ...@@ -15,12 +15,22 @@
#include <asm/acpi.h> #include <asm/acpi.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/smp.h>
/* Returns the number of the node containing CPU 'cpu' */
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
#define __cpu_to_node(cpu) cpu_to_node_map[cpu] /*
* Returns the number of the node containing CPU 'cpu'
*/
#define __cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
#define __node_to_cpu_mask(node) (node_to_cpu_mask[node])
#else #else
#define __cpu_to_node(cpu) (0) #define __cpu_to_node(cpu) (0)
#define __node_to_cpu_mask(node) (phys_cpu_present_map)
#endif #endif
/* /*
...@@ -41,34 +51,8 @@ ...@@ -41,34 +51,8 @@
/* /*
* Returns the number of the first CPU on Node 'node'. * Returns the number of the first CPU on Node 'node'.
* Slow in the current implementation.
* Who needs this?
*/ */
/* #define __node_to_first_cpu(node) pool_cpus[pool_ptr[node]] */ #define __node_to_first_cpu(node) (__ffs(__node_to_cpu_mask(node)))
static inline int __node_to_first_cpu(int node)
{
int i;
for (i=0; i<NR_CPUS; i++)
if (__cpu_to_node(i)==node)
return i;
BUG(); /* couldn't find a cpu on given node */
return -1;
}
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
static inline unsigned long __node_to_cpu_mask(int node)
{
int cpu;
unsigned long mask = 0UL;
for(cpu=0; cpu<NR_CPUS; cpu++)
if (__cpu_to_node(cpu) == node)
mask |= 1UL << cpu;
return mask;
}
/* /*
* Returns the number of the first MemBlk on Node 'node' * Returns the number of the first MemBlk on Node 'node'
......
...@@ -226,12 +226,15 @@ ...@@ -226,12 +226,15 @@
#define __NR_alloc_hugepages 1234 #define __NR_alloc_hugepages 1234
#define __NR_free_hugepages 1235 #define __NR_free_hugepages 1235
#define __NR_exit_group 1236 #define __NR_exit_group 1236
/* 1237 currently unused */ #define __NR_lookup_dcookie 1237
#define __NR_io_setup 1238 #define __NR_io_setup 1238
#define __NR_io_destroy 1239 #define __NR_io_destroy 1239
#define __NR_io_getevents 1240 #define __NR_io_getevents 1240
#define __NR_io_submit 1241 #define __NR_io_submit 1241
#define __NR_io_cancel 1242 #define __NR_io_cancel 1242
#define __NR_epoll_create 1243
#define __NR_epoll_ctl 1244
#define __NR_epoll_wait 1245
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment