Commit 7235bb35 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32s: move CTX_TO_VSID() into mmu-hash.h

In order to reuse it in switch_mmu_context(), this
patch moves CTX_TO_VSID() macro into asm/book3s/32/mmu-hash.h
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/26b36ef2939234a04b37baf6ffe50cba81f5d1b7.1622708530.git.christophe.leroy@csgroup.eu
parent 91bb3082
...@@ -66,6 +66,16 @@ struct ppc_bat { ...@@ -66,6 +66,16 @@ struct ppc_bat {
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/*
* This macro defines the mapping from contexts to VSIDs (virtual
* segment IDs). We use a skew on both the context and the high 4 bits
* of the 32-bit virtual address (the "effective segment ID") in order
* to spread out the entries in the MMU hash table. Note, if this
* function is changed then hash functions will have to be
* changed to correspond.
*/
#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
/* /*
* Hardware Page Table Entry * Hardware Page Table Entry
* Note that the xpn and x bitfields are used only by processors that * Note that the xpn and x bitfields are used only by processors that
......
...@@ -353,9 +353,6 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) ...@@ -353,9 +353,6 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
preempt_enable(); preempt_enable();
} }
/* From mm/mmu_context_hash32.c */
#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu) int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
......
...@@ -39,19 +39,6 @@ ...@@ -39,19 +39,6 @@
#define LAST_CONTEXT 32767 #define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1 #define FIRST_CONTEXT 1
/*
* This function defines the mapping from contexts to VSIDs (virtual
* segment IDs). We use a skew on both the context and the high 4 bits
* of the 32-bit virtual address (the "effective segment ID") in order
* to spread out the entries in the MMU hash table. Note, if this
* function is changed then arch/ppc/mm/hashtable.S will have to be
* changed to correspond.
*
*
* CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
* & 0xffffff)
*/
static unsigned long next_mmu_context; static unsigned long next_mmu_context;
static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment