Commit cb0a9144 authored by Dave Hansen's avatar Dave Hansen Committed by Ingo Molnar

x86/mm: Remove hard-coded ASID limit checks

First, it's nice to remove the magic numbers.

Second, PAGE_TABLE_ISOLATION is going to consume half of the available ASID
space.  The space is currently unused, but add a comment to spell out this
new restriction.
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Laight <David.Laight@aculab.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aliguori@amazon.com
Cc: daniel.gruss@iaik.tugraz.at
Cc: hughd@google.com
Cc: keescook@google.com
Cc: linux-mm@kvack.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 50fb83a6
...@@ -69,6 +69,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) ...@@ -69,6 +69,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
return atomic64_inc_return(&mm->context.tlb_gen); return atomic64_inc_return(&mm->context.tlb_gen);
} }
/* There are 12 bits of space for ASIDS in CR3 */
#define CR3_HW_ASID_BITS 12
/*
* When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
* user/kernel switches
*/
#define PTI_CONSUMED_ASID_BITS 0
#define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
/*
* ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
* for them being zero-based. Another -1 is because ASID 0 is reserved for
* use by non-PCID-aware users.
*/
#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)
/* /*
* If PCID is on, ASID-aware code paths put the ASID+1 into the PCID bits. * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID bits.
* This serves two purposes. It prevents a nasty situation in which * This serves two purposes. It prevents a nasty situation in which
...@@ -81,7 +97,7 @@ struct pgd_t; ...@@ -81,7 +97,7 @@ struct pgd_t;
static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
{ {
if (static_cpu_has(X86_FEATURE_PCID)) { if (static_cpu_has(X86_FEATURE_PCID)) {
VM_WARN_ON_ONCE(asid > 4094); VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
return __sme_pa(pgd) | (asid + 1); return __sme_pa(pgd) | (asid + 1);
} else { } else {
VM_WARN_ON_ONCE(asid != 0); VM_WARN_ON_ONCE(asid != 0);
...@@ -91,7 +107,7 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) ...@@ -91,7 +107,7 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
{ {
VM_WARN_ON_ONCE(asid > 4094); VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
return __sme_pa(pgd) | (asid + 1) | CR3_NOFLUSH; return __sme_pa(pgd) | (asid + 1) | CR3_NOFLUSH;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment