Commit 05490626 authored by Ralf Baechle's avatar Ralf Baechle

MIPS: Move definitions for 32/64-bit agonstic inline assembler to new file.

Inspired by Markos Chandras' patch.  I just didn't want do pull bitsops.h
into pgtable.h.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
References: https://patchwork.linux-mips.org/patch/11052/
parent 92e9953c
...@@ -19,25 +19,10 @@ ...@@ -19,25 +19,10 @@
#include <asm/byteorder.h> /* sigh ... */ #include <asm/byteorder.h> /* sigh ... */
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/llsc.h>
#include <asm/sgidefs.h> #include <asm/sgidefs.h>
#include <asm/war.h> #include <asm/war.h>
#if _MIPS_SZLONG == 32
#define SZLONG_LOG 5
#define SZLONG_MASK 31UL
#define __LL "ll "
#define __SC "sc "
#define __INS "ins "
#define __EXT "ext "
#elif _MIPS_SZLONG == 64
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
#define __LL "lld "
#define __SC "scd "
#define __INS "dins "
#define __EXT "dext "
#endif
/* /*
* These are the "slower" versions of the functions and are in bitops.c. * These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}(). * These functions call raw_local_irq_{save,restore}().
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Macros for 32/64-bit neutral inline assembler
*/
#ifndef __ASM_LLSC_H
#define __ASM_LLSC_H
#if _MIPS_SZLONG == 32
#define SZLONG_LOG 5
#define SZLONG_MASK 31UL
#define __LL "ll "
#define __SC "sc "
#define __INS "ins "
#define __EXT "ext "
#elif _MIPS_SZLONG == 64
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
#define __LL "lld "
#define __SC "scd "
#define __INS "dins "
#define __EXT "dext "
#endif
#endif /* __ASM_LLSC_H */
...@@ -187,23 +187,16 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -187,23 +187,16 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* For SMP, multiple CPUs can race, so we need to do * For SMP, multiple CPUs can race, so we need to do
* this atomically. * this atomically.
*/ */
#ifdef CONFIG_64BIT
#define LL_INSN "lld"
#define SC_INSN "scd"
#else /* CONFIG_32BIT */
#define LL_INSN "ll"
#define SC_INSN "sc"
#endif
unsigned long page_global = _PAGE_GLOBAL; unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set push\n" " .set push\n"
" .set noreorder\n" " .set noreorder\n"
"1: " LL_INSN " %[tmp], %[buddy]\n" "1: " __LL " %[tmp], %[buddy]\n"
" bnez %[tmp], 2f\n" " bnez %[tmp], 2f\n"
" or %[tmp], %[tmp], %[global]\n" " or %[tmp], %[tmp], %[global]\n"
" " SC_INSN " %[tmp], %[buddy]\n" " " __SC " %[tmp], %[buddy]\n"
" beqz %[tmp], 1b\n" " beqz %[tmp], 1b\n"
" nop\n" " nop\n"
"2:\n" "2:\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment