Commit d8e7f201 authored by WANG Xuerui's avatar WANG Xuerui Committed by Huacai Chen

LoongArch: Use ABI names of registers where appropriate

Some of the assembly in the LoongArch port seem to come from a
prehistoric time, when the assembler didn't even have support for the
ABI names we all come to know and love, thus used raw register numbers
which hampered readability.

The usages are found with a regex match inside arch/loongarch, then
manually adjusted for those non-definitions.
Signed-off-by: default avatarWANG Xuerui <git@xen0n.name>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent e0dccc3b
...@@ -48,9 +48,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, ...@@ -48,9 +48,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
__asm__ __volatile__( __asm__ __volatile__(
"sltu %0, %1, %2\n\t" "sltu %0, %1, %2\n\t"
#if (__SIZEOF_LONG__ == 4) #if (__SIZEOF_LONG__ == 4)
"sub.w %0, $r0, %0\n\t" "sub.w %0, $zero, %0\n\t"
#elif (__SIZEOF_LONG__ == 8) #elif (__SIZEOF_LONG__ == 8)
"sub.d %0, $r0, %0\n\t" "sub.d %0, $zero, %0\n\t"
#endif #endif
: "=r" (mask) : "=r" (mask)
: "r" (index), "r" (size) : "r" (index), "r" (size)
......
...@@ -58,7 +58,7 @@ static inline void xconf_writel(u32 val, volatile void __iomem *addr) ...@@ -58,7 +58,7 @@ static inline void xconf_writel(u32 val, volatile void __iomem *addr)
{ {
asm volatile ( asm volatile (
" st.w %[v], %[hw], 0 \n" " st.w %[v], %[hw], 0 \n"
" ld.b $r0, %[hw], 0 \n" " ld.b $zero, %[hw], 0 \n"
: :
: [hw] "r" (addr), [v] "r" (val) : [hw] "r" (addr), [v] "r" (val)
); );
...@@ -68,7 +68,7 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr) ...@@ -68,7 +68,7 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
{ {
asm volatile ( asm volatile (
" st.d %[v], %[hw], 0 \n" " st.d %[v], %[hw], 0 \n"
" ld.b $r0, %[hw], 0 \n" " ld.b $zero, %[hw], 0 \n"
: :
: [hw] "r" (addr), [v] "r" (val64) : [hw] "r" (addr), [v] "r" (val64)
); );
......
...@@ -23,13 +23,13 @@ ...@@ -23,13 +23,13 @@
static __always_inline void prepare_frametrace(struct pt_regs *regs) static __always_inline void prepare_frametrace(struct pt_regs *regs)
{ {
__asm__ __volatile__( __asm__ __volatile__(
/* Save $r1 */ /* Save $ra */
STORE_ONE_REG(1) STORE_ONE_REG(1)
/* Use $r1 to save PC */ /* Use $ra to save PC */
"pcaddi $r1, 0\n\t" "pcaddi $ra, 0\n\t"
STR_LONG_S " $r1, %0\n\t" STR_LONG_S " $ra, %0\n\t"
/* Restore $r1 */ /* Restore $ra */
STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t" STR_LONG_L " $ra, %1, "STR_LONGSIZE"\n\t"
STORE_ONE_REG(2) STORE_ONE_REG(2)
STORE_ONE_REG(3) STORE_ONE_REG(3)
STORE_ONE_REG(4) STORE_ONE_REG(4)
......
...@@ -44,14 +44,14 @@ struct thread_info { ...@@ -44,14 +44,14 @@ struct thread_info {
} }
/* How to get the thread information struct from C. */ /* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("$r2"); register struct thread_info *__current_thread_info __asm__("$tp");
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
return __current_thread_info; return __current_thread_info;
} }
register unsigned long current_stack_pointer __asm__("$r3"); register unsigned long current_stack_pointer __asm__("$sp");
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -162,7 +162,7 @@ do { \ ...@@ -162,7 +162,7 @@ do { \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
"3: li.w %0, %3 \n" \ "3: li.w %0, %3 \n" \
" or %1, $r0, $r0 \n" \ " or %1, $zero, $zero \n" \
" b 2b \n" \ " b 2b \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
......
...@@ -32,7 +32,7 @@ SYM_FUNC_START(clear_page) ...@@ -32,7 +32,7 @@ SYM_FUNC_START(clear_page)
st.d zero, a0, -8 st.d zero, a0, -8
bne t0, a0, 1b bne t0, a0, 1b
jirl $r0, ra, 0 jirl zero, ra, 0
SYM_FUNC_END(clear_page) SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page) EXPORT_SYMBOL(clear_page)
...@@ -79,6 +79,6 @@ SYM_FUNC_START(copy_page) ...@@ -79,6 +79,6 @@ SYM_FUNC_START(copy_page)
st.d t7, a0, -8 st.d t7, a0, -8
bne t8, a0, 1b bne t8, a0, 1b
jirl $r0, ra, 0 jirl zero, ra, 0
SYM_FUNC_END(copy_page) SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page) EXPORT_SYMBOL(copy_page)
...@@ -47,7 +47,7 @@ SYM_FUNC_START(handle_tlb_load) ...@@ -47,7 +47,7 @@ SYM_FUNC_START(handle_tlb_load)
* The vmalloc handling is not in the hotpath. * The vmalloc handling is not in the hotpath.
*/ */
csrrd t0, LOONGARCH_CSR_BADV csrrd t0, LOONGARCH_CSR_BADV
blt t0, $r0, vmalloc_load blt t0, zero, vmalloc_load
csrrd t1, LOONGARCH_CSR_PGDL csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_load: vmalloc_done_load:
...@@ -80,7 +80,7 @@ vmalloc_done_load: ...@@ -80,7 +80,7 @@ vmalloc_done_load:
* see if we need to jump to huge tlb processing. * see if we need to jump to huge tlb processing.
*/ */
andi t0, ra, _PAGE_HUGE andi t0, ra, _PAGE_HUGE
bne t0, $r0, tlb_huge_update_load bne t0, zero, tlb_huge_update_load
csrrd t0, LOONGARCH_CSR_BADV csrrd t0, LOONGARCH_CSR_BADV
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
...@@ -100,12 +100,12 @@ smp_pgtable_change_load: ...@@ -100,12 +100,12 @@ smp_pgtable_change_load:
srli.d ra, t0, _PAGE_PRESENT_SHIFT srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, 1 andi ra, ra, 1
beq ra, $r0, nopage_tlb_load beq ra, zero, nopage_tlb_load
ori t0, t0, _PAGE_VALID ori t0, t0, _PAGE_VALID
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sc.d t0, t1, 0 sc.d t0, t1, 0
beq t0, $r0, smp_pgtable_change_load beq t0, zero, smp_pgtable_change_load
#else #else
st.d t0, t1, 0 st.d t0, t1, 0
#endif #endif
...@@ -139,23 +139,23 @@ tlb_huge_update_load: ...@@ -139,23 +139,23 @@ tlb_huge_update_load:
#endif #endif
srli.d ra, t0, _PAGE_PRESENT_SHIFT srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, 1 andi ra, ra, 1
beq ra, $r0, nopage_tlb_load beq ra, zero, nopage_tlb_load
tlbsrch tlbsrch
ori t0, t0, _PAGE_VALID ori t0, t0, _PAGE_VALID
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sc.d t0, t1, 0 sc.d t0, t1, 0
beq t0, $r0, tlb_huge_update_load beq t0, zero, tlb_huge_update_load
ld.d t0, t1, 0 ld.d t0, t1, 0
#else #else
st.d t0, t1, 0 st.d t0, t1, 0
#endif #endif
addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16) addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
addi.d ra, t1, 0 addi.d ra, t1, 0
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
tlbwr tlbwr
csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
/* /*
* A huge PTE describes an area the size of the * A huge PTE describes an area the size of the
...@@ -178,27 +178,27 @@ tlb_huge_update_load: ...@@ -178,27 +178,27 @@ tlb_huge_update_load:
addi.d t0, ra, 0 addi.d t0, ra, 0
/* Convert to entrylo1 */ /* Convert to entrylo1 */
addi.d t1, $r0, 1 addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1) slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1 add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1 csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */ /* Set huge page tlb entry size */
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill tlbfill
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_load: nopage_tlb_load:
dbar 0 dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_0 la.abs t0, tlb_do_page_fault_0
jirl $r0, t0, 0 jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_load) SYM_FUNC_END(handle_tlb_load)
SYM_FUNC_START(handle_tlb_store) SYM_FUNC_START(handle_tlb_store)
...@@ -210,7 +210,7 @@ SYM_FUNC_START(handle_tlb_store) ...@@ -210,7 +210,7 @@ SYM_FUNC_START(handle_tlb_store)
* The vmalloc handling is not in the hotpath. * The vmalloc handling is not in the hotpath.
*/ */
csrrd t0, LOONGARCH_CSR_BADV csrrd t0, LOONGARCH_CSR_BADV
blt t0, $r0, vmalloc_store blt t0, zero, vmalloc_store
csrrd t1, LOONGARCH_CSR_PGDL csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_store: vmalloc_done_store:
...@@ -244,7 +244,7 @@ vmalloc_done_store: ...@@ -244,7 +244,7 @@ vmalloc_done_store:
* see if we need to jump to huge tlb processing. * see if we need to jump to huge tlb processing.
*/ */
andi t0, ra, _PAGE_HUGE andi t0, ra, _PAGE_HUGE
bne t0, $r0, tlb_huge_update_store bne t0, zero, tlb_huge_update_store
csrrd t0, LOONGARCH_CSR_BADV csrrd t0, LOONGARCH_CSR_BADV
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
...@@ -265,12 +265,12 @@ smp_pgtable_change_store: ...@@ -265,12 +265,12 @@ smp_pgtable_change_store:
srli.d ra, t0, _PAGE_PRESENT_SHIFT srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
bne ra, $r0, nopage_tlb_store bne ra, zero, nopage_tlb_store
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sc.d t0, t1, 0 sc.d t0, t1, 0
beq t0, $r0, smp_pgtable_change_store beq t0, zero, smp_pgtable_change_store
#else #else
st.d t0, t1, 0 st.d t0, t1, 0
#endif #endif
...@@ -306,24 +306,24 @@ tlb_huge_update_store: ...@@ -306,24 +306,24 @@ tlb_huge_update_store:
srli.d ra, t0, _PAGE_PRESENT_SHIFT srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
bne ra, $r0, nopage_tlb_store bne ra, zero, nopage_tlb_store
tlbsrch tlbsrch
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sc.d t0, t1, 0 sc.d t0, t1, 0
beq t0, $r0, tlb_huge_update_store beq t0, zero, tlb_huge_update_store
ld.d t0, t1, 0 ld.d t0, t1, 0
#else #else
st.d t0, t1, 0 st.d t0, t1, 0
#endif #endif
addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16) addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
addi.d ra, t1, 0 addi.d ra, t1, 0
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
tlbwr tlbwr
csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
/* /*
* A huge PTE describes an area the size of the * A huge PTE describes an area the size of the
* configured huge page size. This is twice the * configured huge page size. This is twice the
...@@ -345,28 +345,28 @@ tlb_huge_update_store: ...@@ -345,28 +345,28 @@ tlb_huge_update_store:
addi.d t0, ra, 0 addi.d t0, ra, 0
/* Convert to entrylo1 */ /* Convert to entrylo1 */
addi.d t1, $r0, 1 addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1) slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1 add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1 csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */ /* Set huge page tlb entry size */
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill tlbfill
/* Reset default page size */ /* Reset default page size */
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_store: nopage_tlb_store:
dbar 0 dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1 la.abs t0, tlb_do_page_fault_1
jirl $r0, t0, 0 jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_store) SYM_FUNC_END(handle_tlb_store)
SYM_FUNC_START(handle_tlb_modify) SYM_FUNC_START(handle_tlb_modify)
...@@ -378,7 +378,7 @@ SYM_FUNC_START(handle_tlb_modify) ...@@ -378,7 +378,7 @@ SYM_FUNC_START(handle_tlb_modify)
* The vmalloc handling is not in the hotpath. * The vmalloc handling is not in the hotpath.
*/ */
csrrd t0, LOONGARCH_CSR_BADV csrrd t0, LOONGARCH_CSR_BADV
blt t0, $r0, vmalloc_modify blt t0, zero, vmalloc_modify
csrrd t1, LOONGARCH_CSR_PGDL csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_modify: vmalloc_done_modify:
...@@ -411,7 +411,7 @@ vmalloc_done_modify: ...@@ -411,7 +411,7 @@ vmalloc_done_modify:
* see if we need to jump to huge tlb processing. * see if we need to jump to huge tlb processing.
*/ */
andi t0, ra, _PAGE_HUGE andi t0, ra, _PAGE_HUGE
bne t0, $r0, tlb_huge_update_modify bne t0, zero, tlb_huge_update_modify
csrrd t0, LOONGARCH_CSR_BADV csrrd t0, LOONGARCH_CSR_BADV
srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
...@@ -431,12 +431,12 @@ smp_pgtable_change_modify: ...@@ -431,12 +431,12 @@ smp_pgtable_change_modify:
srli.d ra, t0, _PAGE_WRITE_SHIFT srli.d ra, t0, _PAGE_WRITE_SHIFT
andi ra, ra, 1 andi ra, ra, 1
beq ra, $r0, nopage_tlb_modify beq ra, zero, nopage_tlb_modify
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sc.d t0, t1, 0 sc.d t0, t1, 0
beq t0, $r0, smp_pgtable_change_modify beq t0, zero, smp_pgtable_change_modify
#else #else
st.d t0, t1, 0 st.d t0, t1, 0
#endif #endif
...@@ -471,14 +471,14 @@ tlb_huge_update_modify: ...@@ -471,14 +471,14 @@ tlb_huge_update_modify:
srli.d ra, t0, _PAGE_WRITE_SHIFT srli.d ra, t0, _PAGE_WRITE_SHIFT
andi ra, ra, 1 andi ra, ra, 1
beq ra, $r0, nopage_tlb_modify beq ra, zero, nopage_tlb_modify
tlbsrch tlbsrch
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sc.d t0, t1, 0 sc.d t0, t1, 0
beq t0, $r0, tlb_huge_update_modify beq t0, zero, tlb_huge_update_modify
ld.d t0, t1, 0 ld.d t0, t1, 0
#else #else
st.d t0, t1, 0 st.d t0, t1, 0
...@@ -504,28 +504,28 @@ tlb_huge_update_modify: ...@@ -504,28 +504,28 @@ tlb_huge_update_modify:
addi.d t0, ra, 0 addi.d t0, ra, 0
/* Convert to entrylo1 */ /* Convert to entrylo1 */
addi.d t1, $r0, 1 addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1) slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1 add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1 csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */ /* Set huge page tlb entry size */
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbwr tlbwr
/* Reset default page size */ /* Reset default page size */
addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16) addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_modify: nopage_tlb_modify:
dbar 0 dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1 la.abs t0, tlb_do_page_fault_1
jirl $r0, t0, 0 jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_modify) SYM_FUNC_END(handle_tlb_modify)
SYM_FUNC_START(handle_tlb_refill) SYM_FUNC_START(handle_tlb_refill)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment