Commit 5f464b33 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

x86/mm/cpa: Move CLFLUSH test into cpa_flush_range()

Rather than guarding all cpa_flush_range() uses with a CLFLUSH test,
put it inside.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@intel.com>
Cc: Bin Yang <bin.yang@intel.com>
Cc: Mark Gross <mark.gross@intel.com>
Link: https://lkml.kernel.org/r/20180919085948.036195503@infradead.org
parent a7295fd5
...@@ -293,6 +293,11 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) ...@@ -293,6 +293,11 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
WARN_ON(PAGE_ALIGN(start) != start); WARN_ON(PAGE_ALIGN(start) != start);
if (!static_cpu_has(X86_FEATURE_CLFLUSH)) {
cpa_flush_all(cache);
return;
}
flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages); flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
if (!cache) if (!cache)
...@@ -2078,10 +2083,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) ...@@ -2078,10 +2083,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
/* /*
* Before changing the encryption attribute, we need to flush caches. * Before changing the encryption attribute, we need to flush caches.
*/ */
if (static_cpu_has(X86_FEATURE_CLFLUSH)) cpa_flush_range(start, numpages, 1);
cpa_flush_range(start, numpages, 1);
else
cpa_flush_all(1);
ret = __change_page_attr_set_clr(&cpa, 1); ret = __change_page_attr_set_clr(&cpa, 1);
...@@ -2092,10 +2094,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) ...@@ -2092,10 +2094,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
* in case TLB flushing gets optimized in the cpa_flush_range() * in case TLB flushing gets optimized in the cpa_flush_range()
* path use the same logic as above. * path use the same logic as above.
*/ */
if (static_cpu_has(X86_FEATURE_CLFLUSH)) cpa_flush_range(start, numpages, 0);
cpa_flush_range(start, numpages, 0);
else
cpa_flush_all(0);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment