Commit a391263c authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 8203/1: mm: try to re-use old ASID assignments following a rollover

Rather than unconditionally allocating a fresh ASID to an mm from an
older generation, attempt to re-use the old assignment where possible.

This can bring performance benefits on systems where the ASID is used to
tag things other than the TLB (e.g. branch prediction resources).
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 2b94fe2a
...@@ -184,22 +184,31 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -184,22 +184,31 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 asid = atomic64_read(&mm->context.id); u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation); u64 generation = atomic64_read(&asid_generation);
if (asid != 0 && is_reserved_asid(asid)) { if (asid != 0) {
/* /*
* Our current ASID was active during a rollover, we can * If our current ASID was active during a rollover, we
* continue to use it and this was just a false alarm. * can continue to use it and this was just a false alarm.
*/ */
asid = generation | (asid & ~ASID_MASK); if (is_reserved_asid(asid))
} else { return generation | (asid & ~ASID_MASK);
/*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.,
*/
asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map))
goto bump_gen;
}
/* /*
* Allocate a free ASID. If we can't find one, take a * Allocate a free ASID. If we can't find one, take a note of the
* note of the currently active ASIDs and mark the TLBs * currently active ASIDs and mark the TLBs as requiring flushes.
* as requiring flushes. We always count from ASID #1, * We always count from ASID #1, as we reserve ASID #0 to switch
* as we reserve ASID #0 to switch via TTBR0 and to * via TTBR0 and to avoid speculative page table walks from hitting
* avoid speculative page table walks from hitting in * in any partial walk caches, which could be populated from
* any partial walk caches, which could be populated * overlapping level-1 descriptors used to map both the module
* from overlapping level-1 descriptors used to map both * area and the userspace stack.
* the module area and the userspace stack.
*/ */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid == NUM_USER_ASIDS) { if (asid == NUM_USER_ASIDS) {
...@@ -208,12 +217,13 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -208,12 +217,13 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
flush_context(cpu); flush_context(cpu);
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
} }
__set_bit(asid, asid_map); __set_bit(asid, asid_map);
cur_idx = asid; cur_idx = asid;
bump_gen:
asid |= generation; asid |= generation;
cpumask_clear(mm_cpumask(mm)); cpumask_clear(mm_cpumask(mm));
}
return asid; return asid;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment