Commit 2387ad63 authored by David Mosberger's avatar David Mosberger

ia64: Sync up with 2.5.35+. Add ia64-specific huge page support (by Rohit Seth).

	Support new clone2() functionality (CLONE_SETTLS, CLONE_SETTID, & CLONE_CLEARTID).
parent 981d21bb
...@@ -96,8 +96,6 @@ MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot ...@@ -96,8 +96,6 @@ MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
vmlinux: arch/$(ARCH)/vmlinux.lds.s vmlinux: arch/$(ARCH)/vmlinux.lds.s
CPPFLAGS_arch/$(ARCH)/vmlinux.lds.s
compressed: vmlinux compressed: vmlinux
$(OBJCOPY) $(OBJCOPYFLAGS) vmlinux vmlinux-tmp $(OBJCOPY) $(OBJCOPYFLAGS) vmlinux vmlinux-tmp
gzip vmlinux-tmp gzip vmlinux-tmp
......
...@@ -86,6 +86,31 @@ fi ...@@ -86,6 +86,31 @@ fi
define_bool CONFIG_KCORE_ELF y # On IA-64, we always want an ELF /proc/kcore. define_bool CONFIG_KCORE_ELF y # On IA-64, we always want an ELF /proc/kcore.
define_int CONFIG_FORCE_MAX_ZONEORDER 18
bool 'IA-64 Huge TLB Page Support' CONFIG_HUGETLB_PAGE
if [ "$CONFIG_HUGETLB_PAGE" = "y" ]; then
if [ "$CONFIG_MCKINLEY" = "y" ]; then
choice ' IA-64 Huge TLB Page Size' \
"4GB CONFIG_HUGETLB_PAGE_SIZE_4GB \
256MB CONFIG_HUGETLB_PAGE_SIZE_256MB \
64MB CONFIG_HUGETLB_PAGE_SIZE_64MB \
16MB CONFIG_HUGETLB_PAGE_SIZE_16MB \
4MB CONFIG_HUGETLB_PAGE_SIZE_4MB \
1MB CONFIG_HUGETLB_PAGE_SIZE_1MB \
256KB CONFIG_HUGETLB_PAGE_SIZE_256KB" 16MB
else
choice ' IA-64 Huge TLB Page Size' \
"256MB CONFIG_HUGETLB_PAGE_SIZE_256MB \
64MB CONFIG_HUGETLB_PAGE_SIZE_64MB \
16MB CONFIG_HUGETLB_PAGE_SIZE_16MB \
4MB CONFIG_HUGETLB_PAGE_SIZE_4MB \
1MB CONFIG_HUGETLB_PAGE_SIZE_1MB \
256KB CONFIG_HUGETLB_PAGE_SIZE_256KB" 16MB
fi
fi
bool 'SMP support' CONFIG_SMP bool 'SMP support' CONFIG_SMP
bool 'Support running of Linux/x86 binaries' CONFIG_IA32_SUPPORT bool 'Support running of Linux/x86 binaries' CONFIG_IA32_SUPPORT
bool 'Performance monitor support' CONFIG_PERFMON bool 'Performance monitor support' CONFIG_PERFMON
......
...@@ -39,9 +39,7 @@ ...@@ -39,9 +39,7 @@
#define DRIVER_NAME "SBA" #define DRIVER_NAME "SBA"
#ifndef CONFIG_IA64_HP_PROTO
#define ALLOW_IOV_BYPASS #define ALLOW_IOV_BYPASS
#endif
#define ENABLE_MARK_CLEAN #define ENABLE_MARK_CLEAN
/* /*
** The number of debug flags is a clue - this code is fragile. ** The number of debug flags is a clue - this code is fragile.
...@@ -1252,10 +1250,6 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num) ...@@ -1252,10 +1250,6 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
** Firmware programs the maximum IOV space size into the imask reg ** Firmware programs the maximum IOV space size into the imask reg
*/ */
iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
#ifdef CONFIG_IA64_HP_PROTO
if (!iova_space_size)
iova_space_size = GB(1);
#endif
/* /*
** iov_order is always based on a 1GB IOVA space since we want to ** iov_order is always based on a 1GB IOVA space since we want to
...@@ -1625,10 +1619,8 @@ void __init sba_init(void) ...@@ -1625,10 +1619,8 @@ void __init sba_init(void)
device->slot_name, hpa); device->slot_name, hpa);
if ((hw_rev & 0xFF) < 0x20) { if ((hw_rev & 0xFF) < 0x20) {
printk(KERN_INFO "%s WARNING rev 2.0 or greater will be required for IO MMU support in the future\n", DRIVER_NAME); printk("%s: SBA rev less than 2.0 not supported", DRIVER_NAME);
#ifndef CONFIG_IA64_HP_PROTO return;
panic("%s: CONFIG_IA64_HP_PROTO MUST be enabled to support SBA rev less than 2.0", DRIVER_NAME);
#endif
} }
sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL); sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL);
......
...@@ -33,60 +33,73 @@ struct fake_pci_dev { ...@@ -33,60 +33,73 @@ struct fake_pci_dev {
static struct pci_ops *orig_pci_ops; static struct pci_ops *orig_pci_ops;
#define HP_CFG_RD(sz, bits, name) \ struct fake_pci_dev *
static int hp_cfg_read##sz (struct pci_dev *dev, int where, u##bits *value) \ lookup_fake_dev (struct pci_bus *bus, unsigned int devfn)
{ \ {
struct fake_pci_dev *fake_dev; \ struct pci_dev *dev;
if (!(fake_dev = (struct fake_pci_dev *) dev->sysdata)) \ list_for_each_entry(dev, &bus->devices, bus_list)
return orig_pci_ops->name(dev, where, value); \ if (dev->devfn == devfn)
\ return (struct fake_pci_dev *) dev->sysdata;
if (where == PCI_BASE_ADDRESS_0) { \ return NULL;
if (fake_dev->sizing) \
*value = ~(fake_dev->csr_size - 1); \
else \
*value = (fake_dev->csr_base & \
PCI_BASE_ADDRESS_MEM_MASK) | \
PCI_BASE_ADDRESS_SPACE_MEMORY; \
fake_dev->sizing = 0; \
return PCIBIOS_SUCCESSFUL; \
} \
*value = read##sz(fake_dev->mapped_csrs + where); \
if (where == PCI_COMMAND) \
*value |= PCI_COMMAND_MEMORY; /* SBA omits this */ \
return PCIBIOS_SUCCESSFUL; \
} }
#define HP_CFG_WR(sz, bits, name) \ static int
static int hp_cfg_write##sz (struct pci_dev *dev, int where, u##bits value) \ hp_cfg_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{ \ {
struct fake_pci_dev *fake_dev; \ struct fake_pci_dev *fake_dev = lookup_fake_dev(bus, devfn);
\
if (!(fake_dev = (struct fake_pci_dev *) dev->sysdata)) \ if (!fake_dev)
return orig_pci_ops->name(dev, where, value); \ return (*orig_pci_ops->read)(bus, devfn, where, size, value);
\
if (where == PCI_BASE_ADDRESS_0) { \ if (where == PCI_BASE_ADDRESS_0) {
if (value == (u##bits) ~0) \ if (fake_dev->sizing)
fake_dev->sizing = 1; \ *value = ~(fake_dev->csr_size - 1);
return PCIBIOS_SUCCESSFUL; \ else
} else \ *value = ((fake_dev->csr_base & PCI_BASE_ADDRESS_MEM_MASK)
write##sz(value, fake_dev->mapped_csrs + where); \ | PCI_BASE_ADDRESS_SPACE_MEMORY);
return PCIBIOS_SUCCESSFUL; \ fake_dev->sizing = 0;
return PCIBIOS_SUCCESSFUL;
}
switch (size) {
case 1: *value = readb(fake_dev->mapped_csrs + where); break;
case 2: *value = readw(fake_dev->mapped_csrs + where); break;
case 4: *value = readl(fake_dev->mapped_csrs + where); break;
default:
printk(KERN_WARNING"hp_cfg_read: bad size = %d bytes", size);
break;
}
if (where == PCI_COMMAND)
*value |= PCI_COMMAND_MEMORY; /* SBA omits this */
return PCIBIOS_SUCCESSFUL;
} }
HP_CFG_RD(b, 8, read_byte) static int
HP_CFG_RD(w, 16, read_word) hp_cfg_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
HP_CFG_RD(l, 32, read_dword) {
HP_CFG_WR(b, 8, write_byte) struct fake_pci_dev *fake_dev = lookup_fake_dev(bus, devfn);
HP_CFG_WR(w, 16, write_word)
HP_CFG_WR(l, 32, write_dword) if (!fake_dev)
return (*orig_pci_ops->write)(bus, devfn, where, size, value);
if (where == PCI_BASE_ADDRESS_0) {
if (value == ((1UL << 8*size) - 1))
fake_dev->sizing = 1;
return PCIBIOS_SUCCESSFUL;
}
switch (size) {
case 1: writeb(value, fake_dev->mapped_csrs + where); break;
case 2: writew(value, fake_dev->mapped_csrs + where); break;
case 4: writel(value, fake_dev->mapped_csrs + where); break;
default:
printk(KERN_WARNING"hp_cfg_write: bad size = %d bytes", size);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops hp_pci_conf = { static struct pci_ops hp_pci_conf = {
hp_cfg_readb, .read = hp_cfg_read,
hp_cfg_readw, .write = hp_cfg_write
hp_cfg_readl,
hp_cfg_writeb,
hp_cfg_writew,
hp_cfg_writel,
}; };
static void static void
...@@ -309,40 +322,8 @@ hpzx1_acpi_dev_init(void) ...@@ -309,40 +322,8 @@ hpzx1_acpi_dev_init(void)
* HWP0003: AGP LBA device * HWP0003: AGP LBA device
*/ */
acpi_get_devices("HWP0001", hpzx1_sba_probe, "HWP0001", NULL); acpi_get_devices("HWP0001", hpzx1_sba_probe, "HWP0001", NULL);
#ifdef CONFIG_IA64_HP_PROTO
if (hpzx1_devices) {
#endif
acpi_get_devices("HWP0002", hpzx1_lba_probe, "HWP0002 PCI LBA", NULL); acpi_get_devices("HWP0002", hpzx1_lba_probe, "HWP0002 PCI LBA", NULL);
acpi_get_devices("HWP0003", hpzx1_lba_probe, "HWP0003 AGP LBA", NULL); acpi_get_devices("HWP0003", hpzx1_lba_probe, "HWP0003 AGP LBA", NULL);
#ifdef CONFIG_IA64_HP_PROTO
}
#define ZX1_FUNC_ID_VALUE (PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP
/*
* Early protos don't have bridges in the ACPI namespace, so
* if we didn't find anything, add the things we know are
* there.
*/
if (hpzx1_devices == 0) {
u64 hpa, csr_base;
csr_base = 0xfed00000UL;
hpa = (u64) ioremap(csr_base, 0x2000);
if (__raw_readl(hpa) == ZX1_FUNC_ID_VALUE) {
hpzx1_fake_pci_dev("HWP0001 SBA", 0, csr_base, 0x1000);
hpzx1_fake_pci_dev("HWP0001 IOC", 0, csr_base + 0x1000,
0x1000);
csr_base = 0xfed24000UL;
iounmap(hpa);
hpa = (u64) ioremap(csr_base, 0x1000);
hpzx1_fake_pci_dev("HWP0003 AGP LBA", 0x40, csr_base,
0x1000);
}
iounmap(hpa);
}
#endif
} }
extern void sba_init(void); extern void sba_init(void);
......
...@@ -33,6 +33,15 @@ ...@@ -33,6 +33,15 @@
#define EFI_DEBUG 0 #define EFI_DEBUG 0
#ifdef CONFIG_HUGETLB_PAGE
/* By default at total of 512MB is reserved huge pages. */
#define HTLBZONE_SIZE_DEFAULT 0x20000000
unsigned long htlbzone_pages = (HTLBZONE_SIZE_DEFAULT >> HPAGE_SHIFT);
#endif
extern efi_status_t efi_call_phys (void *, ...); extern efi_status_t efi_call_phys (void *, ...);
struct efi efi; struct efi efi;
...@@ -399,6 +408,25 @@ efi_init (void) ...@@ -399,6 +408,25 @@ efi_init (void)
++cp; ++cp;
} }
} }
#ifdef CONFIG_HUGETLB_PAGE
/* Just duplicating the above algo for lpzone start */
for (cp = saved_command_line; *cp; ) {
if (memcmp(cp, "lpmem=", 8) == 0) {
cp += 8;
htlbzone_pages = memparse(cp, &end);
htlbzone_pages = (htlbzone_pages >> HPAGE_SHIFT);
if (end != cp)
break;
cp = end;
} else {
while (*cp != ' ' && *cp)
++cp;
while (*cp == ' ')
++cp;
}
}
printk("Total HugeTLB_Page memory pages requested 0x%lx \n", htlbzone_pages);
#endif
if (mem_limit != ~0UL) if (mem_limit != ~0UL)
printk("Ignoring memory above %luMB\n", mem_limit >> 20); printk("Ignoring memory above %luMB\n", mem_limit >> 20);
......
...@@ -90,15 +90,23 @@ ENTRY(ia64_execve) ...@@ -90,15 +90,23 @@ ENTRY(ia64_execve)
br.ret.sptk.many rp br.ret.sptk.many rp
END(ia64_execve) END(ia64_execve)
/*
* sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 user_tid, u64 tls)
*/
GLOBAL_ENTRY(sys_clone2) GLOBAL_ENTRY(sys_clone2)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc r16=ar.pfs,3,2,4,0 alloc r16=ar.pfs,5,2,5,0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
mov loc0=rp mov loc0=rp
mov loc1=r16 // save ar.pfs across do_fork mov loc1=r16 // save ar.pfs across do_fork
.body .body
mov out1=in1 mov out1=in1
mov out3=in2 mov out3=in2
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in3 // valid only w/CLONE_SETTID and/or CLONE_CLEARTID
;;
(p6) st8 [r2]=in4 // store TLS in r13 (tp)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
br.call.sptk.many rp=do_fork br.call.sptk.many rp=do_fork
...@@ -115,15 +123,24 @@ GLOBAL_ENTRY(sys_clone2) ...@@ -115,15 +123,24 @@ GLOBAL_ENTRY(sys_clone2)
br.ret.sptk.many rp br.ret.sptk.many rp
END(sys_clone2) END(sys_clone2)
/*
* sys_clone(u64 flags, u64 ustack_base, u64 user_tid, u64 tls)
* Deprecated. Use sys_clone2() instead.
*/
GLOBAL_ENTRY(sys_clone) GLOBAL_ENTRY(sys_clone)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc r16=ar.pfs,2,2,4,0 alloc r16=ar.pfs,4,2,5,0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
mov loc0=rp mov loc0=rp
mov loc1=r16 // save ar.pfs across do_fork mov loc1=r16 // save ar.pfs across do_fork
.body .body
mov out1=in1 mov out1=in1
mov out3=16 // stacksize (compensates for 16-byte scratch area) mov out3=16 // stacksize (compensates for 16-byte scratch area)
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in2 // out4 = user_tid (optional)
;;
(p6) st8 [r2]=in3 // store TLS in r13 (tp)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
br.call.sptk.many rp=do_fork br.call.sptk.many rp=do_fork
...@@ -521,7 +538,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone) ...@@ -521,7 +538,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
/* /*
* We need to call schedule_tail() to complete the scheduling process. * We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task. * address of the previously executing task.
*/ */
br.call.sptk.many rp=ia64_invoke_schedule_tail br.call.sptk.many rp=ia64_invoke_schedule_tail
...@@ -872,7 +889,7 @@ END(invoke_schedule) ...@@ -872,7 +889,7 @@ END(invoke_schedule)
#endif /* __GNUC__ < 3 */ #endif /* __GNUC__ < 3 */
/* /*
* Setup stack and call ia64_do_signal. Note that pSys and pNonSys need to * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to
* be set up by the caller. We declare 8 input registers so the system call * be set up by the caller. We declare 8 input registers so the system call
* args get preserved, in case we need to restart a system call. * args get preserved, in case we need to restart a system call.
*/ */
...@@ -900,7 +917,7 @@ ENTRY(notify_resume_user) ...@@ -900,7 +917,7 @@ ENTRY(notify_resume_user)
mov ar.unat=r9 mov ar.unat=r9
mov ar.pfs=loc1 mov ar.pfs=loc1
br.ret.sptk.many rp br.ret.sptk.many rp
END(do_notify_resume_user) END(notify_resume_user)
GLOBAL_ENTRY(sys_rt_sigsuspend) GLOBAL_ENTRY(sys_rt_sigsuspend)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
...@@ -1224,10 +1241,10 @@ sys_call_table: ...@@ -1224,10 +1241,10 @@ sys_call_table:
data8 sys_futex // 1230 data8 sys_futex // 1230
data8 sys_sched_setaffinity data8 sys_sched_setaffinity
data8 sys_sched_getaffinity data8 sys_sched_getaffinity
data8 ia64_ni_syscall data8 sys_security
data8 ia64_ni_syscall data8 sys_alloc_hugepages
data8 ia64_ni_syscall // 1235 data8 sys_free_hugepages // 1235
data8 ia64_ni_syscall data8 sys_exit_group
data8 ia64_ni_syscall data8 ia64_ni_syscall
data8 sys_io_setup data8 sys_io_setup
data8 sys_io_destroy data8 sys_io_destroy
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
static struct fs_struct init_fs = INIT_FS; static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES; static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
struct mm_struct init_mm = INIT_MM(init_mm); struct mm_struct init_mm = INIT_MM(init_mm);
/* /*
......
...@@ -70,24 +70,6 @@ ...@@ -70,24 +70,6 @@
mov r19=n;; /* prepare to save predicates */ \ mov r19=n;; /* prepare to save predicates */ \
br.sptk.many dispatch_to_fault_handler br.sptk.many dispatch_to_fault_handler
/*
* As we don't (hopefully) use the space available, we need to fill it with
* nops. the parameter may be used for debugging and is representing the entry
* number
*/
#define BREAK_BUNDLE(a) break.m (a); \
break.i (a); \
break.i (a)
/*
* 4 breaks bundles all together
*/
#define BREAK_BUNDLE4(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a)
/*
* 8 bundles all together (too lazy to use only 4 at a time !)
*/
#define BREAK_BUNDLE8(a); BREAK_BUNDLE4(a); BREAK_BUNDLE4(a)
.section .text.ivt,"ax" .section .text.ivt,"ax"
.align 32768 // align on 32KB boundary .align 32768 // align on 32KB boundary
...@@ -115,6 +97,10 @@ ENTRY(vhpt_miss) ...@@ -115,6 +97,10 @@ ENTRY(vhpt_miss)
* - the faulting virtual address has no L1, L2, or L3 mapping * - the faulting virtual address has no L1, L2, or L3 mapping
*/ */
mov r16=cr.ifa // get address that caused the TLB miss mov r16=cr.ifa // get address that caused the TLB miss
#ifdef CONFIG_HUGETLB_PAGE
movl r18=PAGE_SHIFT
mov r25=cr.itir
#endif
;; ;;
rsm psr.dt // use physical addressing for data rsm psr.dt // use physical addressing for data
mov r31=pr // save the predicate registers mov r31=pr // save the predicate registers
...@@ -122,8 +108,18 @@ ENTRY(vhpt_miss) ...@@ -122,8 +108,18 @@ ENTRY(vhpt_miss)
shl r21=r16,3 // shift bit 60 into sign bit shl r21=r16,3 // shift bit 60 into sign bit
shr.u r17=r16,61 // get the region number into r17 shr.u r17=r16,61 // get the region number into r17
;; ;;
shr r22=r21,3
#ifdef CONFIG_HUGETLB_PAGE
extr.u r26=r25,2,6
;;
cmp.eq p8,p0=HPAGE_SHIFT,r26
;;
(p8) dep r25=r18,r25,2,6
(p8) shr r22=r22,HPAGE_SHIFT-PAGE_SHIFT
;;
#endif
cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of the faulting address shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
;; ;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
srlz.d // ensure "rsm psr.dt" has taken effect srlz.d // ensure "rsm psr.dt" has taken effect
...@@ -134,7 +130,7 @@ ENTRY(vhpt_miss) ...@@ -134,7 +130,7 @@ ENTRY(vhpt_miss)
(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
cmp.eq p7,p6=0,r21 // unused address bits all zeroes? cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
shr.u r18=r16,PMD_SHIFT // shift L2 index into position shr.u r18=r22,PMD_SHIFT // shift L2 index into position
;; ;;
ld8 r17=[r17] // fetch the L1 entry (may be 0) ld8 r17=[r17] // fetch the L1 entry (may be 0)
;; ;;
...@@ -142,7 +138,7 @@ ENTRY(vhpt_miss) ...@@ -142,7 +138,7 @@ ENTRY(vhpt_miss)
dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
;; ;;
(p7) ld8 r20=[r17] // fetch the L2 entry (may be 0) (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
shr.u r19=r16,PAGE_SHIFT // shift L3 index into position shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
;; ;;
(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL? (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
...@@ -161,6 +157,10 @@ ENTRY(vhpt_miss) ...@@ -161,6 +157,10 @@ ENTRY(vhpt_miss)
(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
mov cr.ifa=r22 mov cr.ifa=r22
#ifdef CONFIG_HUGETLB_PAGE
(p8) mov cr.itir=r25 // change to default page-size for VHPT
#endif
/* /*
* Now compute and insert the TLB entry for the virtual page table. We never * Now compute and insert the TLB entry for the virtual page table. We never
* execute in a page table page so there is no need to set the exception deferral * execute in a page table page so there is no need to set the exception deferral
......
...@@ -2,6 +2,11 @@ ...@@ -2,6 +2,11 @@
* pci.c - Low-Level PCI Access in IA-64 * pci.c - Low-Level PCI Access in IA-64
* *
* Derived from bios32.c of i386 tree. * Derived from bios32.c of i386 tree.
*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Note: Above list of copyright holders is incomplete...
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -85,15 +90,15 @@ __pci_sal_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value) ...@@ -85,15 +90,15 @@ __pci_sal_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
static int static int
pci_sal_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) pci_sal_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{ {
return __pci_sal_read(0, bus->number, PCI_SLOT(devfn), return __pci_sal_read(0, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
PCI_FUNC(devfn), where, size, value); where, size, value);
} }
static int static int
pci_sal_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) pci_sal_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
{ {
return __pci_sal_write(0, bus->number, PCI_SLOT(devfn), return __pci_sal_write(0, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
PCI_FUNC(devfn), where, size, value); where, size, value);
} }
struct pci_ops pci_sal_ops = { struct pci_ops pci_sal_ops = {
...@@ -202,8 +207,8 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r ...@@ -202,8 +207,8 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
{ {
} }
int static inline int
pcibios_enable_device (struct pci_dev *dev) pcibios_enable_resources (struct pci_dev *dev, int mask)
{ {
u16 cmd, old_cmd; u16 cmd, old_cmd;
int idx; int idx;
...@@ -215,6 +220,10 @@ pcibios_enable_device (struct pci_dev *dev) ...@@ -215,6 +220,10 @@ pcibios_enable_device (struct pci_dev *dev)
pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd; old_cmd = cmd;
for (idx=0; idx<6; idx++) { for (idx=0; idx<6; idx++) {
/* Only set up the desired resources. */
if (!(mask & (1 << idx)))
continue;
r = &dev->resource[idx]; r = &dev->resource[idx];
if (!r->start && r->end) { if (!r->start && r->end) {
printk(KERN_ERR printk(KERN_ERR
...@@ -233,9 +242,19 @@ pcibios_enable_device (struct pci_dev *dev) ...@@ -233,9 +242,19 @@ pcibios_enable_device (struct pci_dev *dev)
printk("PCI: Enabling device %s (%04x -> %04x)\n", dev->slot_name, old_cmd, cmd); printk("PCI: Enabling device %s (%04x -> %04x)\n", dev->slot_name, old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd);
} }
return 0;
}
printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name); int
pcibios_enable_device (struct pci_dev *dev, int mask)
{
int ret;
ret = pcibios_enable_resources(dev, mask);
if (ret < 0)
return ret;
printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name);
return 0; return 0;
} }
......
...@@ -1742,14 +1742,14 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, ...@@ -1742,14 +1742,14 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
static void static void
pfm_tasklist_toggle_pp(unsigned int val) pfm_tasklist_toggle_pp(unsigned int val)
{ {
struct task_struct *p; struct task_struct *g, *p;
struct pt_regs *regs; struct pt_regs *regs;
DBprintk(("invoked by [%d] pp=%u\n", current->pid, val)); DBprintk(("invoked by [%d] pp=%u\n", current->pid, val));
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_task(p) { do_each_thread(g, p) {
regs = (struct pt_regs *)((unsigned long) p + IA64_STK_OFFSET); regs = (struct pt_regs *)((unsigned long) p + IA64_STK_OFFSET);
/* /*
...@@ -1761,7 +1761,8 @@ pfm_tasklist_toggle_pp(unsigned int val) ...@@ -1761,7 +1761,8 @@ pfm_tasklist_toggle_pp(unsigned int val)
* update psr.pp * update psr.pp
*/ */
ia64_psr(regs)->pp = val; ia64_psr(regs)->pp = val;
} } while_each_thread(g, p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
#endif #endif
...@@ -4039,14 +4040,14 @@ pfm_cleanup_smpl_buf(struct task_struct *task) ...@@ -4039,14 +4040,14 @@ pfm_cleanup_smpl_buf(struct task_struct *task)
void void
pfm_cleanup_owners(struct task_struct *task) pfm_cleanup_owners(struct task_struct *task)
{ {
struct task_struct *p; struct task_struct *g, *p;
pfm_context_t *ctx; pfm_context_t *ctx;
DBprintk(("called by [%d] for [%d]\n", current->pid, task->pid)); DBprintk(("called by [%d] for [%d]\n", current->pid, task->pid));
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_task(p) { do_each_thread(g, p) {
/* /*
* It is safe to do the 2-step test here, because thread.ctx * It is safe to do the 2-step test here, because thread.ctx
* is cleaned up only in release_thread() and at that point * is cleaned up only in release_thread() and at that point
...@@ -4084,7 +4085,8 @@ pfm_cleanup_owners(struct task_struct *task) ...@@ -4084,7 +4085,8 @@ pfm_cleanup_owners(struct task_struct *task)
DBprintk(("done for notifier [%d] in [%d]\n", task->pid, p->pid)); DBprintk(("done for notifier [%d] in [%d]\n", task->pid, p->pid));
} }
} } while_each_thread(g, p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
atomic_set(&task->thread.pfm_owners_check, 0); atomic_set(&task->thread.pfm_owners_check, 0);
...@@ -4098,23 +4100,21 @@ pfm_cleanup_owners(struct task_struct *task) ...@@ -4098,23 +4100,21 @@ pfm_cleanup_owners(struct task_struct *task)
void void
pfm_cleanup_notifiers(struct task_struct *task) pfm_cleanup_notifiers(struct task_struct *task)
{ {
struct task_struct *p; struct task_struct *g, *p;
pfm_context_t *ctx; pfm_context_t *ctx;
DBprintk(("called by [%d] for [%d]\n", current->pid, task->pid)); DBprintk(("called by [%d] for [%d]\n", current->pid, task->pid));
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_task(p) { do_each_thread(g, p) {
/* /*
* It is safe to do the 2-step test here, because thread.ctx * It is safe to do the 2-step test here, because thread.ctx is cleaned up
* is cleaned up only in release_thread() and at that point * only in release_thread() and at that point the task has been detached
* the task has been detached from the tasklist which is an * from the tasklist which is an operation which uses the write_lock() on
* operation which uses the write_lock() on the tasklist_lock * the tasklist_lock so it cannot run concurrently to this loop. So we
* so it cannot run concurrently to this loop. So we have the * have the guarantee that if we find p and it has a perfmon ctx then it
* guarantee that if we find p and it has a perfmon ctx then * is going to stay like this for the entire execution of this loop.
* it is going to stay like this for the entire execution of this
* loop.
*/ */
ctx = p->thread.pfm_context; ctx = p->thread.pfm_context;
...@@ -4123,16 +4123,16 @@ pfm_cleanup_notifiers(struct task_struct *task) ...@@ -4123,16 +4123,16 @@ pfm_cleanup_notifiers(struct task_struct *task)
if (ctx && ctx->ctx_notify_task == task) { if (ctx && ctx->ctx_notify_task == task) {
DBprintk(("trying for notifier [%d] in [%d]\n", task->pid, p->pid)); DBprintk(("trying for notifier [%d] in [%d]\n", task->pid, p->pid));
/* /*
* the spinlock is required to take care of a race condition * the spinlock is required to take care of a race condition with
* with the send_sig_info() call. We must make sure that * the send_sig_info() call. We must make sure that either the
* either the send_sig_info() completes using a valid task, * send_sig_info() completes using a valid task, or the
* or the notify_task is cleared before the send_sig_info() * notify_task is cleared before the send_sig_info() can pick up a
* can pick up a stale value. Note that by the time this * stale value. Note that by the time this function is executed
* function is executed the 'task' is already detached from the * the 'task' is already detached from the tasklist. The problem
* tasklist. The problem is that the notifiers have a direct * is that the notifiers have a direct pointer to it. It is okay
* pointer to it. It is okay to send a signal to a task in this * to send a signal to a task in this stage, it simply will have
* stage, it simply will have no effect. But it is better than sending * no effect. But it is better than sending to a completely
* to a completely destroyed task or worse to a new task using the same * destroyed task or worse to a new task using the same
* task_struct address. * task_struct address.
*/ */
LOCK_CTX(ctx); LOCK_CTX(ctx);
...@@ -4143,7 +4143,8 @@ pfm_cleanup_notifiers(struct task_struct *task) ...@@ -4143,7 +4143,8 @@ pfm_cleanup_notifiers(struct task_struct *task)
DBprintk(("done for notifier [%d] in [%d]\n", task->pid, p->pid)); DBprintk(("done for notifier [%d] in [%d]\n", task->pid, p->pid));
} }
} } while_each_thread(g, p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
atomic_set(&task->thread.pfm_notifiers_check, 0); atomic_set(&task->thread.pfm_notifiers_check, 0);
......
...@@ -80,6 +80,12 @@ show_stack (struct task_struct *task) ...@@ -80,6 +80,12 @@ show_stack (struct task_struct *task)
} }
} }
void
dump_stack (void)
{
show_stack(NULL);
}
void void
show_regs (struct pt_regs *regs) show_regs (struct pt_regs *regs)
{ {
...@@ -248,18 +254,15 @@ ia64_load_extra (struct task_struct *task) ...@@ -248,18 +254,15 @@ ia64_load_extra (struct task_struct *task)
* | | <-- sp (lowest addr) * | | <-- sp (lowest addr)
* +---------------------+ * +---------------------+
* *
* Note: if we get called through kernel_thread() then the memory * Note: if we get called through kernel_thread() then the memory above "(highest addr)"
* above "(highest addr)" is valid kernel stack memory that needs to * is valid kernel stack memory that needs to be copied as well.
* be copied as well.
* *
* Observe that we copy the unat values that are in pt_regs and * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
* switch_stack. Spilling an integer to address X causes bit N in * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
* ar.unat to be set to the NaT bit of the register, with N=(X & * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
* 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since
* if the pt_regs structure in the parent is congruent to that of the * the stack is page aligned and the page size is at least 4KB, this is always the case,
* child, modulo 512. Since the stack is page aligned and the page * so there is nothing to worry about.
* size is at least 4KB, this is always the case, so there is nothing
* to worry about.
*/ */
int int
copy_thread (int nr, unsigned long clone_flags, copy_thread (int nr, unsigned long clone_flags,
...@@ -300,6 +303,8 @@ copy_thread (int nr, unsigned long clone_flags, ...@@ -300,6 +303,8 @@ copy_thread (int nr, unsigned long clone_flags,
memcpy((void *) child_rbs, (void *) rbs, rbs_size); memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (user_mode(child_ptregs)) { if (user_mode(child_ptregs)) {
if (clone_flags & CLONE_SETTLS)
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) { if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16; child_ptregs->r12 = user_stack_base + user_stack_size - 16;
child_ptregs->ar_bspstore = user_stack_base; child_ptregs->ar_bspstore = user_stack_base;
......
...@@ -474,7 +474,7 @@ threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, ...@@ -474,7 +474,7 @@ threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end,
{ {
struct switch_stack *sw; struct switch_stack *sw;
unsigned long urbs_end; unsigned long urbs_end;
struct task_struct *p; struct task_struct *g, *p;
struct mm_struct *mm; struct mm_struct *mm;
struct pt_regs *pt; struct pt_regs *pt;
long multi_threaded; long multi_threaded;
...@@ -495,7 +495,7 @@ threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, ...@@ -495,7 +495,7 @@ threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end,
} else { } else {
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
{ {
for_each_task(p) { do_each_thread(g, p) {
if (p->mm == mm && p->state != TASK_RUNNING) { if (p->mm == mm && p->state != TASK_RUNNING) {
sw = (struct switch_stack *) (p->thread.ksp + 16); sw = (struct switch_stack *) (p->thread.ksp + 16);
pt = ia64_task_regs(p); pt = ia64_task_regs(p);
...@@ -504,7 +504,7 @@ threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, ...@@ -504,7 +504,7 @@ threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end,
if (make_writable) if (make_writable)
user_flushrs(p, pt); user_flushrs(p, pt);
} }
} } while_each_thread(g, p);
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
......
...@@ -476,7 +476,6 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse ...@@ -476,7 +476,6 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
long long
ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
{ {
struct signal_struct *sig;
struct k_sigaction *ka; struct k_sigaction *ka;
siginfo_t info; siginfo_t info;
long restart = in_syscall; long restart = in_syscall;
...@@ -503,7 +502,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -503,7 +502,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
} }
} else } else
#endif #endif
if (scr->pt.r10 != -1) { if (scr->pt.r10 != -1)
/* /*
* A system calls has to be restarted only if one of the error codes * A system calls has to be restarted only if one of the error codes
* ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10
...@@ -511,101 +510,14 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -511,101 +510,14 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
* restart the syscall, so we can clear the "restart" flag here. * restart the syscall, so we can clear the "restart" flag here.
*/ */
restart = 0; restart = 0;
}
for (;;) {
unsigned long signr;
spin_lock_irq(&current->sigmask_lock); while (1) {
signr = dequeue_signal(&current->blocked, &info); int signr = get_signal_to_deliver(&info, &scr->pt);
spin_unlock_irq(&current->sigmask_lock);
if (!signr) if (signr <= 0)
break; break;
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
/* Let the debugger run. */
current->exit_code = signr;
current->thread.siginfo = &info;
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
signr = current->exit_code;
current->thread.siginfo = 0;
/* We're back. Did the debugger cancel the sig? */
if (!signr)
continue;
current->exit_code = 0;
/* The debugger continued. Ignore SIGSTOP. */
if (signr == SIGSTOP)
continue;
/* Update the siginfo structure. Is this good? */
if (signr != info.si_signo) {
info.si_signo = signr;
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = current->parent->pid;
info.si_uid = current->parent->uid;
}
/* If the (new) signal is now blocked, requeue it. */
if (sigismember(&current->blocked, signr)) {
send_sig_info(signr, &info, current);
continue;
}
}
ka = &current->sig->action[signr - 1]; ka = &current->sig->action[signr - 1];
if (ka->sa.sa_handler == SIG_IGN) {
if (signr != SIGCHLD)
continue;
/* Check for SIGCHLD: it's special. */
while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
/* nothing */;
continue;
}
if (ka->sa.sa_handler == SIG_DFL) {
int exit_code = signr;
/* Init gets no signals it doesn't want. */
if (current->pid == 1)
continue;
switch (signr) {
case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG:
continue;
case SIGTSTP: case SIGTTIN: case SIGTTOU:
if (is_orphaned_pgrp(current->pgrp))
continue;
/* FALLTHRU */
case SIGSTOP:
current->state = TASK_STOPPED;
current->exit_code = signr;
sig = current->parent->sig;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
continue;
case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV:
case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
if (do_coredump(signr, &scr->pt))
exit_code |= 0x80;
/* FALLTHRU */
default:
sig_exit(signr, exit_code, &info);
/* NOTREACHED */
}
}
if (restart) { if (restart) {
switch (errno) { switch (errno) {
...@@ -632,9 +544,10 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -632,9 +544,10 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
} }
} }
/* Whee! Actually deliver the signal. If the /*
delivery failed, we need to continue to iterate in * Whee! Actually deliver the signal. If the delivery failed, we need to
this loop so we can deliver the SIGSEGV... */ * continue to iterate in this loop so we can deliver the SIGSEGV...
*/
if (handle_signal(signr, ka, &info, oldset, scr)) if (handle_signal(signr, ka, &info, oldset, scr))
return 1; return 1;
} }
...@@ -650,9 +563,8 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -650,9 +563,8 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
} else } else
#endif #endif
/* /*
* Note: the syscall number is in r15 which is * Note: the syscall number is in r15 which is saved in pt_regs so
* saved in pt_regs so all we need to do here * all we need to do here is adjust ip so that the "break"
* is adjust ip so that the "break"
* instruction gets re-executed. * instruction gets re-executed.
*/ */
ia64_decrement_ip(&scr->pt); ia64_decrement_ip(&scr->pt);
......
...@@ -257,31 +257,6 @@ smp_setup_percpu_timer (void) ...@@ -257,31 +257,6 @@ smp_setup_percpu_timer (void)
local_cpu_data->prof_multiplier = 1; local_cpu_data->prof_multiplier = 1;
} }
/*
* Architecture specific routine called by the kernel just before init is
* fired off. This allows the BP to have everything in order [we hope].
* At the end of this all the APs will hit the system scheduling and off
* we go. Each AP will jump through the kernel
* init into idle(). At this point the scheduler will one day take over
* and give them jobs to do. smp_callin is a standard routine
* we use to track CPUs as they power up.
*/
static volatile atomic_t smp_commenced = ATOMIC_INIT(0);
static void __init
smp_commence (void)
{
/*
* Lets the callins below out of their loop.
*/
Dprintk("Setting commenced=1, go go go\n");
wmb();
atomic_set(&smp_commenced, 1);
}
static void __init static void __init
smp_callin (void) smp_callin (void)
{ {
...@@ -361,7 +336,7 @@ fork_by_hand (void) ...@@ -361,7 +336,7 @@ fork_by_hand (void)
* don't care about the eip and regs settings since we'll never reschedule the * don't care about the eip and regs settings since we'll never reschedule the
* forked task. * forked task.
*/ */
return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0); return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL);
} }
static int __init static int __init
......
...@@ -19,6 +19,12 @@ ...@@ -19,6 +19,12 @@
#include <asm/shmparam.h> #include <asm/shmparam.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#ifdef CONFIG_HUGETLB_PAGE
# define SHMLBA_HPAGE HPAGE_SIZE
# define COLOR_HALIGN(addr) (((addr) + SHMLBA_HPAGE - 1) & ~(SHMLBA_HPAGE - 1))
# define TASK_HPAGE_BASE ((REGION_HPAGE << REGION_SHIFT) | HPAGE_SIZE)
#endif
unsigned long unsigned long
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len, arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
...@@ -56,16 +62,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -56,16 +62,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
} }
asmlinkage long asmlinkage long
ia64_getpriority (int which, int who, long arg2, long arg3, long arg4, long arg5, long arg6, ia64_getpriority (int which, int who)
long arg7, long stack)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack;
extern long sys_getpriority (int, int); extern long sys_getpriority (int, int);
long prio; long prio;
prio = sys_getpriority(which, who); prio = sys_getpriority(which, who);
if (prio >= 0) { if (prio >= 0) {
regs->r8 = 0; /* ensure negative priority is not mistaken as error code */ force_successful_syscall_return();
prio = 20 - prio; prio = 20 - prio;
} }
return prio; return prio;
...@@ -79,10 +83,8 @@ sys_getpagesize (void) ...@@ -79,10 +83,8 @@ sys_getpagesize (void)
} }
asmlinkage unsigned long asmlinkage unsigned long
ia64_shmat (int shmid, void *shmaddr, int shmflg, long arg3, long arg4, long arg5, long arg6, ia64_shmat (int shmid, void *shmaddr, int shmflg)
long arg7, long stack)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack;
unsigned long raddr; unsigned long raddr;
int retval; int retval;
...@@ -90,16 +92,14 @@ ia64_shmat (int shmid, void *shmaddr, int shmflg, long arg3, long arg4, long arg ...@@ -90,16 +92,14 @@ ia64_shmat (int shmid, void *shmaddr, int shmflg, long arg3, long arg4, long arg
if (retval < 0) if (retval < 0)
return retval; return retval;
regs->r8 = 0; /* ensure negative addresses are not mistaken as an error code */ force_successful_syscall_return();
return raddr; return raddr;
} }
asmlinkage unsigned long asmlinkage unsigned long
ia64_brk (unsigned long brk, long arg1, long arg2, long arg3, ia64_brk (unsigned long brk)
long arg4, long arg5, long arg6, long arg7, long stack)
{ {
extern int vm_enough_memory (long pages); extern int vm_enough_memory (long pages);
struct pt_regs *regs = (struct pt_regs *) &stack;
unsigned long rlim, retval, newbrk, oldbrk; unsigned long rlim, retval, newbrk, oldbrk;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -145,7 +145,7 @@ ia64_brk (unsigned long brk, long arg1, long arg2, long arg3, ...@@ -145,7 +145,7 @@ ia64_brk (unsigned long brk, long arg1, long arg2, long arg3,
out: out:
retval = mm->brk; retval = mm->brk;
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
regs->r8 = 0; /* ensure large retval isn't mistaken as error code */ force_successful_syscall_return();
return retval; return retval;
} }
...@@ -222,32 +222,98 @@ out: if (file) ...@@ -222,32 +222,98 @@ out: if (file)
* of) files that are larger than the address space of the CPU. * of) files that are larger than the address space of the CPU.
*/ */
asmlinkage unsigned long asmlinkage unsigned long
sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff, sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
long arg6, long arg7, long stack)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack;
addr = do_mmap2(addr, len, prot, flags, fd, pgoff); addr = do_mmap2(addr, len, prot, flags, fd, pgoff);
if (!IS_ERR((void *) addr)) if (!IS_ERR((void *) addr))
regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */ force_successful_syscall_return();
return addr; return addr;
} }
asmlinkage unsigned long asmlinkage unsigned long
sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off)
int fd, long off, long arg6, long arg7, long stack)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack;
if ((off & ~PAGE_MASK) != 0) if ((off & ~PAGE_MASK) != 0)
return -EINVAL; return -EINVAL;
addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
if (!IS_ERR((void *) addr)) if (!IS_ERR((void *) addr))
regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */ force_successful_syscall_return();
return addr; return addr;
} }
#ifdef CONFIG_HUGETLB_PAGE
asmlinkage unsigned long
sys_alloc_hugepages (int key, unsigned long addr, size_t len, int prot, int flag)
{
struct mm_struct *mm = current->mm;
long retval;
extern int alloc_hugetlb_pages (int, unsigned long, unsigned long, int, int);
if ((key < 0) || (len & (HPAGE_SIZE - 1)))
return -EINVAL;
if (addr && ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1))))
addr = TASK_HPAGE_BASE;
if (!addr)
addr = TASK_HPAGE_BASE;
down_write(&mm->mmap_sem);
{
retval = arch_get_unmapped_area(NULL, COLOR_HALIGN(addr), len, 0, 0);
if (retval != -ENOMEM)
retval = alloc_hugetlb_pages(key, retval, len, prot, flag);
}
up_write(&mm->mmap_sem);
if (IS_ERR((void *) retval))
return retval;
force_successful_syscall_return();
return retval;
}
asmlinkage int
sys_free_hugepages (unsigned long addr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
extern int free_hugepages(struct vm_area_struct *);
int retval;
vma = find_vma(mm, addr);
if (!vma || !is_vm_hugetlb_page(vma) || (vma->vm_start != addr))
return -EINVAL;
down_write(&mm->mmap_sem);
{
spin_lock(&mm->page_table_lock);
{
retval = free_hugepages(vma);
}
spin_unlock(&mm->page_table_lock);
}
up_write(&mm->mmap_sem);
return retval;
}
#else /* !CONFIG_HUGETLB_PAGE */
asmlinkage unsigned long
sys_alloc_hugepages (int key, size_t addr, unsigned long len, int prot, int flag)
{
return -ENOSYS;
}
asmlinkage unsigned long
sys_free_hugepages (unsigned long addr)
{
return -ENOSYS;
}
#endif /* !CONFIG_HUGETLB_PAGE */
asmlinkage long asmlinkage long
sys_vm86 (long arg0, long arg1, long arg2, long arg3) sys_vm86 (long arg0, long arg1, long arg2, long arg3)
{ {
...@@ -256,16 +322,14 @@ sys_vm86 (long arg0, long arg1, long arg2, long arg3) ...@@ -256,16 +322,14 @@ sys_vm86 (long arg0, long arg1, long arg2, long arg3)
} }
asmlinkage unsigned long asmlinkage unsigned long
ia64_create_module (const char *name_user, size_t size, long arg2, long arg3, ia64_create_module (const char *name_user, size_t size)
long arg4, long arg5, long arg6, long arg7, long stack)
{ {
extern unsigned long sys_create_module (const char *, size_t); extern unsigned long sys_create_module (const char *, size_t);
struct pt_regs *regs = (struct pt_regs *) &stack;
unsigned long addr; unsigned long addr;
addr = sys_create_module (name_user, size); addr = sys_create_module (name_user, size);
if (!IS_ERR((void *) addr)) if (!IS_ERR((void *) addr))
regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */ force_successful_syscall_return();
return addr; return addr;
} }
......
...@@ -104,7 +104,8 @@ do_settimeofday (struct timeval *tv) ...@@ -104,7 +104,8 @@ do_settimeofday (struct timeval *tv)
tv->tv_sec--; tv->tv_sec--;
} }
xtime = *tv; xtime.tv_sec = tv->tv_sec;
xtime.tv_nsec = 1000 * tv->tv_usec;
time_adjust = 0; /* stop active adjtime() */ time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC; time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT; time_maxerror = NTP_PHASE_LIMIT;
...@@ -135,7 +136,7 @@ do_gettimeofday (struct timeval *tv) ...@@ -135,7 +136,7 @@ do_gettimeofday (struct timeval *tv)
} while (cmpxchg(&last_time_offset, old, usec) != old); } while (cmpxchg(&last_time_offset, old, usec) != old);
sec = xtime.tv_sec; sec = xtime.tv_sec;
usec += xtime.tv_usec; usec += xtime.tv_nsec / 1000;
} }
read_unlock_irqrestore(&xtime_lock, flags); read_unlock_irqrestore(&xtime_lock, flags);
......
...@@ -10,5 +10,6 @@ ...@@ -10,5 +10,6 @@
O_TARGET := mm.o O_TARGET := mm.o
obj-y := init.o fault.o tlb.o extable.o obj-y := init.o fault.o tlb.o extable.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
...@@ -339,6 +339,14 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -339,6 +339,14 @@ ia64_mmu_init (void *my_cpu_data)
/* /*
* Set up the page tables. * Set up the page tables.
*/ */
#ifdef CONFIG_HUGETLB_PAGE
long htlbpagemem;
int htlbpage_max;
extern long htlbzone_pages;
extern struct list_head htlbpage_freelist;
#endif
void void
paging_init (void) paging_init (void)
{ {
...@@ -439,4 +447,29 @@ mem_init (void) ...@@ -439,4 +447,29 @@ mem_init (void)
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init(); ia32_gdt_init();
#endif #endif
#ifdef CONFIG_HUGETLB_PAGE
{
long i;
int j;
struct page *page, *map;
if ((htlbzone_pages << (HPAGE_SHIFT - PAGE_SHIFT)) >= max_low_pfn)
htlbzone_pages = (max_low_pfn >> ((HPAGE_SHIFT - PAGE_SHIFT) + 1));
INIT_LIST_HEAD(&htlbpage_freelist);
for (i = 0; i < htlbzone_pages; i++) {
page = alloc_pages(GFP_ATOMIC, HUGETLB_PAGE_ORDER);
if (!page)
break;
map = page;
for (j = 0; j < (HPAGE_SIZE/PAGE_SIZE); j++) {
SetPageReserved(map);
map++;
}
list_add(&page->list, &htlbpage_freelist);
}
printk("Total Huge_TLB_Page memory pages allocated %ld \n", i);
htlbzone_pages = htlbpagemem = i;
htlbpage_max = (int)i;
}
#endif
} }
...@@ -63,7 +63,7 @@ wrap_mmu_context (struct mm_struct *mm) ...@@ -63,7 +63,7 @@ wrap_mmu_context (struct mm_struct *mm)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
repeat: repeat:
for_each_task(tsk) { for_each_process(tsk) {
if (!tsk->mm) if (!tsk->mm)
continue; continue;
tsk_context = tsk->mm->context; tsk_context = tsk->mm->context;
......
...@@ -9,6 +9,7 @@ BEGIN { ...@@ -9,6 +9,7 @@ BEGIN {
print " */" print " */"
print "" print ""
print "#define CLONE_IDLETASK_BIT 12" print "#define CLONE_IDLETASK_BIT 12"
print "#define CLONE_SETTLS_BIT 19"
} }
# look for .tab: # look for .tab:
......
...@@ -202,6 +202,7 @@ main (int argc, char **argv) ...@@ -202,6 +202,7 @@ main (int argc, char **argv)
} }
printf ("\n#define CLONE_IDLETASK_BIT %ld\n", ia64_fls (CLONE_IDLETASK)); printf ("\n#define CLONE_IDLETASK_BIT %ld\n", ia64_fls (CLONE_IDLETASK));
printf ("\n#define CLONE_SETTLS_BIT %ld\n", ia64_fls (CLONE_SETTLS));
printf ("\n#endif /* _ASM_IA64_OFFSETS_H */\n"); printf ("\n#endif /* _ASM_IA64_OFFSETS_H */\n");
return 0; return 0;
......
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
#define hardirq_trylock() (!in_interrupt()) #define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0) #define hardirq_endlock() do { } while (0)
#define in_atomic() (preempt_count() != 0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT #if CONFIG_PREEMPT
......
...@@ -90,20 +90,6 @@ ide_init_default_hwifs (void) ...@@ -90,20 +90,6 @@ ide_init_default_hwifs (void)
#endif #endif
} }
#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
#define ide_check_region(from,extent) check_region((from), (extent))
#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
#define ide_release_region(from,extent) release_region((from), (extent))
/*
* The following are not needed for the non-m68k ports
*/
#define ide_ack_intr(hwif) (1)
#define ide_fix_driveid(id) do {} while (0)
#define ide_release_lock(lock) do {} while (0)
#define ide_get_lock(lock, hdlr, data) do {} while (0)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_IA64_IDE_H */ #endif /* __ASM_IA64_IDE_H */
...@@ -110,6 +110,10 @@ reload_context (struct mm_struct *mm) ...@@ -110,6 +110,10 @@ reload_context (struct mm_struct *mm)
rr2 = rr0 + 2*rid_incr; rr2 = rr0 + 2*rid_incr;
rr3 = rr0 + 3*rid_incr; rr3 = rr0 + 3*rid_incr;
rr4 = rr0 + 4*rid_incr; rr4 = rr0 + 4*rid_incr;
#ifdef CONFIG_HUGETLB_PAGE
rr4 = (rr4 & (~(0xfcUL))) | (HPAGE_SHIFT << 2);
#endif
ia64_set_rr(0x0000000000000000, rr0); ia64_set_rr(0x0000000000000000, rr0);
ia64_set_rr(0x2000000000000000, rr1); ia64_set_rr(0x2000000000000000, rr1);
ia64_set_rr(0x4000000000000000, rr2); ia64_set_rr(0x4000000000000000, rr2);
......
...@@ -30,6 +30,32 @@ ...@@ -30,6 +30,32 @@
#define PAGE_MASK (~(PAGE_SIZE - 1)) #define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#ifdef CONFIG_HUGETLB_PAGE
# if defined(CONFIG_HUGETLB_PAGE_SIZE_4GB)
# define HPAGE_SHIFT 32
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256MB)
# define HPAGE_SHIFT 28
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
# define HPAGE_SHIFT 26
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_16MB)
# define HPAGE_SHIFT 24
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
# define HPAGE_SHIFT 22
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
# define HPAGE_SHIFT 20
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256KB)
# define HPAGE_SHIFT 18
# else
# error Unsupported IA-64 HugeTLB Page Size!
# endif
# define REGION_HPAGE (4UL) /* note: this is hardcoded in mmu_context.h:reload_context()!*/
# define REGION_SHIFT 61
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
# define HPAGE_MASK (~(HPAGE_SIZE - 1))
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
# define __pa(x) ((x) - PAGE_OFFSET) # define __pa(x) ((x) - PAGE_OFFSET)
# define __va(x) ((x) + PAGE_OFFSET) # define __va(x) ((x) + PAGE_OFFSET)
...@@ -87,6 +113,12 @@ typedef union ia64_va { ...@@ -87,6 +113,12 @@ typedef union ia64_va {
#define REGION_SIZE REGION_NUMBER(1) #define REGION_SIZE REGION_NUMBER(1)
#define REGION_KERNEL 7 #define REGION_KERNEL 7
#ifdef CONFIG_HUGETLB_PAGE
# define htlbpage_to_page(x) ((REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#endif
#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1) #if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
# define ia64_abort() __builtin_trap() # define ia64_abort() __builtin_trap()
#else #else
......
...@@ -236,7 +236,15 @@ struct thread_struct { ...@@ -236,7 +236,15 @@ struct thread_struct {
__u64 ssd; /* IA32 stack selector descriptor */ __u64 ssd; /* IA32 stack selector descriptor */
__u64 old_k1; /* old value of ar.k1 */ __u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */ __u64 old_iob; /* old IOBase value */
# define INIT_THREAD_IA32 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, 0, # define INIT_THREAD_IA32 .eflag = 0, \
.fsr = 0, \
.fcr = 0x17800000037fULL, \
.fir = 0, \
.fdr = 0, \
.csd = 0, \
.ssd = 0, \
.old_k1 = 0, \
.old_iob = 0,
#else #else
# define INIT_THREAD_IA32 # define INIT_THREAD_IA32
#endif /* CONFIG_IA32_SUPPORT */ #endif /* CONFIG_IA32_SUPPORT */
...@@ -248,7 +256,13 @@ struct thread_struct { ...@@ -248,7 +256,13 @@ struct thread_struct {
atomic_t pfm_notifiers_check; /* when >0, will cleanup ctx_notify_task in tasklist */ atomic_t pfm_notifiers_check; /* when >0, will cleanup ctx_notify_task in tasklist */
atomic_t pfm_owners_check; /* when >0, will cleanup ctx_owner in tasklist */ atomic_t pfm_owners_check; /* when >0, will cleanup ctx_owner in tasklist */
void *pfm_smpl_buf_list; /* list of sampling buffers to vfree */ void *pfm_smpl_buf_list; /* list of sampling buffers to vfree */
# define INIT_THREAD_PM {0, }, {0, }, 0, NULL, {0}, {0}, NULL, # define INIT_THREAD_PM .pmc = {0, }, \
.pmd = {0, }, \
.pfm_ovfl_block_reset = 0, \
.pfm_context = NULL, \
.pfm_notifiers_check = { 0 }, \
.pfm_owners_check = { 0 }, \
.pfm_smpl_buf_list = NULL,
#else #else
# define INIT_THREAD_PM # define INIT_THREAD_PM
#endif #endif
...@@ -258,16 +272,17 @@ struct thread_struct { ...@@ -258,16 +272,17 @@ struct thread_struct {
}; };
#define INIT_THREAD { \ #define INIT_THREAD { \
flags: 0, \ .flags = 0, \
ksp: 0, \ .ksp = 0, \
map_base: DEFAULT_MAP_BASE, \ .map_base = DEFAULT_MAP_BASE, \
task_size: DEFAULT_TASK_SIZE, \ .task_size = DEFAULT_TASK_SIZE, \
siginfo: 0, \ .siginfo = 0, \
.last_fph_cpu = 0, \
INIT_THREAD_IA32 \ INIT_THREAD_IA32 \
INIT_THREAD_PM \ INIT_THREAD_PM \
dbr: {0, }, \ .dbr = {0, }, \
ibr: {0, }, \ .ibr = {0, }, \
fph: {{{{0}}}, } \ .fph = {{{{0}}}, } \
} }
#define start_thread(regs,new_ip,new_sp) do { \ #define start_thread(regs,new_ip,new_sp) do { \
......
...@@ -166,7 +166,6 @@ struct k_sigaction { ...@@ -166,7 +166,6 @@ struct k_sigaction {
# include <asm/sigcontext.h> # include <asm/sigcontext.h>
#define HAVE_ARCH_GET_SIGNAL_TO_DELIVER
#define HAVE_ARCH_SYS_PAUSE #define HAVE_ARCH_SYS_PAUSE
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -109,6 +109,7 @@ typedef struct { ...@@ -109,6 +109,7 @@ typedef struct {
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#define rwlock_is_locked(x) (*(volatile int *) (x) != 0)
#define _raw_read_lock(rw) \ #define _raw_read_lock(rw) \
do { \ do { \
......
...@@ -174,6 +174,13 @@ do { \ ...@@ -174,6 +174,13 @@ do { \
#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory") #define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
#define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory") #define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
(flags & IA64_PSR_I) == 0; \
})
/* /*
* Force an unresolved reference if someone tries to use * Force an unresolved reference if someone tries to use
* ia64_fetch_and_add() with a bad value. * ia64_fetch_and_add() with a bad value.
......
...@@ -3,15 +3,19 @@ ...@@ -3,15 +3,19 @@
/* /*
* Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
/* /*
* 2001/01/18 davidm Removed CLOCK_TICK_RATE. It makes no sense on IA-64. * 2001/01/18 davidm Removed CLOCK_TICK_RATE. It makes no sense on IA-64.
* Also removed cacheflush_time as it's entirely unused. * Also removed cacheflush_time as it's entirely unused.
*/ */
#include <asm/processor.h>
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
#define CLOCK_TICK_RATE 100000000
static inline cycles_t static inline cycles_t
get_cycles (void) get_cycles (void)
{ {
......
...@@ -199,4 +199,8 @@ tlb_remove_page (mmu_gather_t *tlb, struct page *page) ...@@ -199,4 +199,8 @@ tlb_remove_page (mmu_gather_t *tlb, struct page *page)
#define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr) __tlb_remove_tlb_entry(tlb, ptep, addr)
#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
#define pmd_free_tlb(tlb, ptep) __pmd_free_tlb(tlb, ptep)
#endif /* _ASM_IA64_TLB_H */ #endif /* _ASM_IA64_TLB_H */
...@@ -225,7 +225,7 @@ ...@@ -225,7 +225,7 @@
#define __NR_security 1233 #define __NR_security 1233
#define __NR_alloc_hugepages 1234 #define __NR_alloc_hugepages 1234
#define __NR_free_hugepages 1235 #define __NR_free_hugepages 1235
/* 1236 currently unused */ #define __NR_exit_group 1236
/* 1237 currently unused */ /* 1237 currently unused */
#define __NR_io_setup 1238 #define __NR_io_setup 1238
#define __NR_io_destroy 1239 #define __NR_io_destroy 1239
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment