Commit 413059f2 authored by Grant Grundler's avatar Grant Grundler Committed by Kyle McMartin

[PARISC] Replace uses of __LP64__ with CONFIG_64BIT

2.6.12-rc4-pa3 s/__LP64__/CONFIG_64BIT/ and fixup config.h usage
Signed-off-by: default avatarGrant Grundler <grundler@parisc-linux.org>
Signed-off-by: default avatarKyle McMartin <kyle@parisc-linux.org>
parent 34994952
This diff is collapsed.
......@@ -12,7 +12,7 @@
* Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
*/
#include <linux/autoconf.h> /* for CONFIG_SMP */
#include <linux/config.h> /* for CONFIG_SMP */
#include <asm/asm-offsets.h>
#include <asm/psw.h>
......@@ -36,10 +36,10 @@ boot_args:
.align 4
.import init_thread_union,data
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
#ifndef __LP64__
#ifndef CONFIG_64BIT
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
#endif /*!LP64*/
#endif /*!CONFIG_64BIT*/
.export stext
.export _stext,data /* Kernel want it this way! */
_stext:
......@@ -76,7 +76,7 @@ $bss_loop:
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
......@@ -99,7 +99,7 @@ $bss_loop:
stw %r3,0(%r4)
ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
#ifdef __LP64__
#ifdef CONFIG_64BIT
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
......@@ -170,7 +170,7 @@ common_stext:
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
#endif /*CONFIG_SMP*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
tophys_r1 %sp
/* Save the rfi target address */
......@@ -233,7 +233,7 @@ stext_pdc_ret:
* following short sequence of instructions can determine this
* (without being illegal on a PA1.1 machine).
*/
#ifndef __LP64__
#ifndef CONFIG_64BIT
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0
......@@ -246,7 +246,7 @@ stext_pdc_ret:
$is_pa20:
.level LEVEL /* restore 1.1 || 2.0w */
#endif /*!LP64*/
#endif /*!CONFIG_64BIT*/
load32 PA(fault_vector_20),%r10
$install_iva:
......@@ -284,7 +284,7 @@ aligned_rfi:
.import smp_init_current_idle_task,data
.import smp_callin,code
#ifndef __LP64__
#ifndef CONFIG_64BIT
smp_callin_rtn:
.proc
.callinfo
......@@ -292,7 +292,7 @@ smp_callin_rtn:
nop
nop
.procend
#endif /*!LP64*/
#endif /*!CONFIG_64BIT*/
/***************************************************************************
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
......@@ -327,7 +327,7 @@ smp_slave_stext:
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* Setup PDCE_PROC entry */
copy %arg0,%r3
#else
......@@ -344,7 +344,7 @@ smp_slave_stext:
.procend
#endif /* CONFIG_SMP */
#ifndef __LP64__
#ifndef CONFIG_64BIT
.data
.align 4
......@@ -354,4 +354,4 @@ smp_slave_stext:
.size $global$,4
$global$:
.word 0
#endif /*!LP64*/
#endif /*!CONFIG_64BIT*/
......@@ -26,7 +26,7 @@
* can be used.
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define ADDIB addib,*
#define CMPB cmpb,*
#define ANDCM andcm,*
......@@ -40,6 +40,8 @@
.level 2.0
#endif
#include <linux/config.h>
#include <asm/psw.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
......@@ -294,7 +296,7 @@ copy_user_page_asm:
.callinfo NO_CALLS
.entry
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
* GCC probably can do this just as well.
......@@ -454,7 +456,7 @@ copy_user_page_asm:
sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef __LP64__
#ifdef CONFIG_64BIT
extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
......@@ -541,7 +543,7 @@ __clear_user_page_asm:
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef __LP64__
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
#endif
......@@ -558,7 +560,7 @@ __clear_user_page_asm:
pdtlb 0(%r28)
#ifdef __LP64__
#ifdef CONFIG_64BIT
ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
/* PREFETCH (Write) has not (yet) been proven to help here */
......@@ -583,7 +585,7 @@ __clear_user_page_asm:
ADDIB> -1, %r1, 1b
ldo 128(%r28), %r28
#else /* ! __LP64 */
#else /* ! CONFIG_64BIT */
ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
......@@ -606,7 +608,7 @@ __clear_user_page_asm:
stw %r0, 60(%r28)
ADDIB> -1, %r1, 1b
ldo 64(%r28), %r28
#endif /* __LP64 */
#endif /* CONFIG_64BIT */
bv %r0(%r2)
nop
......@@ -624,7 +626,7 @@ flush_kernel_dcache_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef __LP64__
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
......@@ -668,7 +670,7 @@ flush_user_dcache_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef __LP64__
#ifdef CONFIG_64BIT
depdi,z 1,63-PAGE_SHIFT,1, %r25
#else
depwi,z 1,31-PAGE_SHIFT,1, %r25
......@@ -712,7 +714,7 @@ flush_user_icache_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef __LP64__
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
......@@ -757,7 +759,7 @@ purge_kernel_dcache_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef __LP64__
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
......@@ -805,7 +807,7 @@ flush_alias_page:
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef __LP64__
#ifdef CONFIG_64BIT
extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,12, %r28 /* Clear any offset bits */
......@@ -822,7 +824,7 @@ flush_alias_page:
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef __LP64__
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r29
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r29
......@@ -933,7 +935,7 @@ flush_kernel_icache_page:
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
#ifdef __LP64__
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
......
......@@ -7,6 +7,8 @@
* Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
*
*/
#include <linux/config.h>
#include <asm/psw.h>
#include <asm/assembly.h>
......@@ -20,7 +22,7 @@ real32_stack:
real64_stack:
.block 8192
#ifdef __LP64__
#ifdef CONFIG_64BIT
# define REG_SZ 8
#else
# define REG_SZ 4
......@@ -50,7 +52,7 @@ save_cr_end:
real32_call_asm:
STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef __LP64__
#ifdef CONFIG_64BIT
callee_save
ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
STREG %r27, -1*REG_SZ(%sp)
......@@ -77,7 +79,7 @@ real32_call_asm:
b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
nop
#ifdef __LP64__
#ifdef CONFIG_64BIT
rsm PSW_SM_W, %r0 /* go narrow */
#endif
......@@ -85,7 +87,7 @@ real32_call_asm:
bv 0(%r31)
nop
ric_ret:
#ifdef __LP64__
#ifdef CONFIG_64BIT
ssm PSW_SM_W, %r0 /* go wide */
#endif
/* restore CRs before going virtual in case we page fault */
......@@ -97,7 +99,7 @@ ric_ret:
tovirt_r1 %sp
LDREG -REG_SZ(%sp), %sp /* restore SP */
#ifdef __LP64__
#ifdef CONFIG_64BIT
LDREG -1*REG_SZ(%sp), %r27
LDREG -2*REG_SZ(%sp), %r29
ldo -2*REG_SZ(%sp), %sp
......@@ -212,7 +214,7 @@ rfi_r2v_1:
bv 0(%r2)
nop
#ifdef __LP64__
#ifdef CONFIG_64BIT
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
......@@ -290,7 +292,7 @@ pc_in_user_space:
** comparing function pointers.
*/
__canonicalize_funcptr_for_compare:
#ifdef __LP64__
#ifdef CONFIG_64BIT
bve (%r2)
#else
bv %r0(%r2)
......
......@@ -18,7 +18,7 @@
*/
#undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
#include <linux/autoconf.h>
#include <linux/config.h>
#include <linux/types.h>
#include <linux/spinlock.h>
......
......@@ -6,6 +6,7 @@
* thanks to Philipp Rumpf, Mike Shaver and various others
* sorry about the wall, puffin..
*/
#include <linux/config.h> /* for CONFIG_SMP */
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
......@@ -22,15 +23,13 @@
*/
#define KILL_INSN break 0,0
#include <linux/config.h> /* for CONFIG_SMP */
#ifdef __LP64__
#ifdef CONFIG_64BIT
.level 2.0w
#else
.level 1.1
#endif
#ifndef __LP64__
#ifndef CONFIG_64BIT
.macro fixup_branch,lbl
b \lbl
.endm
......@@ -103,7 +102,7 @@ linux_gateway_entry:
mfsp %sr7,%r1 /* save user sr7 */
mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
* since we don't support wide userland processes. We could
* also save the current SM other than in r0 and restore it on
......@@ -155,7 +154,7 @@ linux_gateway_entry:
STREG %r19, TASK_PT_GR19(%r1)
LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
#ifdef __LP64__
#ifdef CONFIG_64BIT
extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
#if 0
xor %r19,%r2,%r2 /* clear bottom bit */
......@@ -186,7 +185,7 @@ linux_gateway_entry:
loadgp
#ifdef __LP64__
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
copy %r19,%r2 /* W bit back to r2 */
#else
......@@ -205,7 +204,7 @@ linux_gateway_entry:
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
#ifdef __LP64__
#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
......@@ -321,7 +320,7 @@ tracesys_next:
LDREG TASK_PT_GR25(%r1), %r25
LDREG TASK_PT_GR24(%r1), %r24
LDREG TASK_PT_GR23(%r1), %r23
#ifdef __LP64__
#ifdef CONFIG_64BIT
LDREG TASK_PT_GR22(%r1), %r22
LDREG TASK_PT_GR21(%r1), %r21
ldo -16(%r30),%r29 /* Reference param save area */
......@@ -350,7 +349,7 @@ tracesys_next:
tracesys_exit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
#ifdef __LP64__
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
bl syscall_trace, %r2
......@@ -371,7 +370,7 @@ tracesys_exit:
tracesys_sigexit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG 0(%r1), %r1
#ifdef __LP64__
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
bl syscall_trace, %r2
......@@ -404,7 +403,7 @@ lws_start:
gate .+8, %r0
depi 3, 31, 2, %r31 /* Ensure we return to userspace */
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* FIXME: If we are a 64-bit kernel just
* turn this on unconditionally.
*/
......@@ -440,7 +439,7 @@ lws_exit_nosys:
/* Fall through: Return to userspace */
lws_exit:
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* decide whether to reset the wide mode bit
*
* For a syscall, the W bit is stored in the lowest bit
......@@ -486,7 +485,7 @@ lws_exit:
/* ELF64 Process entry path */
lws_compare_and_swap64:
#ifdef __LP64__
#ifdef CONFIG_64BIT
b,n lws_compare_and_swap
#else
/* If we are not a 64-bit kernel, then we don't
......@@ -497,7 +496,7 @@ lws_compare_and_swap64:
/* ELF32 Process entry path */
lws_compare_and_swap32:
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* Clip all the input registers */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
......@@ -608,7 +607,7 @@ cas_action:
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
.section __ex_table,"aw"
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* Pad the address calculation */
.word 0,(2b - linux_gateway_page)
.word 0,(3b - linux_gateway_page)
......@@ -619,7 +618,7 @@ cas_action:
.previous
.section __ex_table,"aw"
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* Pad the address calculation */
.word 0,(1b - linux_gateway_page)
.word 0,(3b - linux_gateway_page)
......@@ -638,7 +637,7 @@ end_linux_gateway_page:
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* FIXME: The code will always be on the gateay page
and thus it will be on the first 4k, the
assembler seems to think that the final
......@@ -666,7 +665,7 @@ lws_table:
sys_call_table:
#include "syscall_table.S"
#ifdef __LP64__
#ifdef CONFIG_64BIT
.align 4096
.export sys_call_table64
.Lsys_call_table64:
......
......@@ -35,7 +35,7 @@
#undef ENTRY_UHOH
#undef ENTRY_COMP
#undef ENTRY_OURS
#if defined(__LP64__) && !defined(SYSCALL_TABLE_64BIT)
#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
* narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
* implementation is required on wide palinux. Use ENTRY_COMP where
......@@ -46,7 +46,7 @@
#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
#define ENTRY_OURS(_name_) .dword parisc_##_name_
#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
#elif defined(__LP64__) && defined(SYSCALL_TABLE_64BIT)
#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys_##_name_
#define ENTRY_UHOH(_name_) .dword sys_##_name_
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment