Commit ead9e430 authored by Dave Martin's avatar Dave Martin Committed by Marc Zyngier

arm64/sve: In-kernel vector length availability query interface

KVM will need to interrogate the set of SVE vector lengths
available on the system.

This patch exposes the relevant bits to the kernel, along with a
sve_vq_available() helper to check whether a particular vector
length is supported.

__vq_to_bit() and __bit_to_vq() are not intended for use outside
these functions: now that these are exposed outside fpsimd.c, they
are prefixed with __ in order to provide an extra hint that they
are not intended for general-purpose use.
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Tested-by: default avatarzhang.lei <zhang.lei@jp.fujitsu.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 8e3c54c8
...@@ -24,10 +24,13 @@ ...@@ -24,10 +24,13 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/bitmap.h>
#include <linux/build_bug.h> #include <linux/build_bug.h>
#include <linux/bug.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/types.h>
#if defined(__KERNEL__) && defined(CONFIG_COMPAT) #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */ /* Masks for extracting the FPSR and FPCR from the FPSCR */
...@@ -89,6 +92,32 @@ extern u64 read_zcr_features(void); ...@@ -89,6 +92,32 @@ extern u64 read_zcr_features(void);
extern int __ro_after_init sve_max_vl; extern int __ro_after_init sve_max_vl;
extern int __ro_after_init sve_max_virtualisable_vl; extern int __ro_after_init sve_max_virtualisable_vl;
/* Set of available vector lengths, as vq_to_bit(vq): */
extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
/*
* Helpers to translate bit indices in sve_vq_map to VQ values (and
* vice versa). This allows find_next_bit() to be used to find the
* _maximum_ VQ not exceeding a certain value.
*/
static inline unsigned int __vq_to_bit(unsigned int vq)
{
return SVE_VQ_MAX - vq;
}
static inline unsigned int __bit_to_vq(unsigned int bit)
{
if (WARN_ON(bit >= SVE_VQ_MAX))
bit = SVE_VQ_MAX - 1;
return SVE_VQ_MAX - bit;
}
/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
static inline bool sve_vq_available(unsigned int vq)
{
return test_bit(__vq_to_bit(vq), sve_vq_map);
}
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
......
...@@ -136,7 +136,7 @@ static int sve_default_vl = -1; ...@@ -136,7 +136,7 @@ static int sve_default_vl = -1;
int __ro_after_init sve_max_vl = SVE_VL_MIN; int __ro_after_init sve_max_vl = SVE_VL_MIN;
int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
/* Set of available vector lengths, as vq_to_bit(vq): */ /* Set of available vector lengths, as vq_to_bit(vq): */
static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
/* Set of vector lengths present on at least one cpu: */ /* Set of vector lengths present on at least one cpu: */
static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
static void __percpu *efi_sve_state; static void __percpu *efi_sve_state;
...@@ -269,25 +269,6 @@ void fpsimd_save(void) ...@@ -269,25 +269,6 @@ void fpsimd_save(void)
} }
} }
/*
* Helpers to translate bit indices in sve_vq_map to VQ values (and
* vice versa). This allows find_next_bit() to be used to find the
* _maximum_ VQ not exceeding a certain value.
*/
static unsigned int vq_to_bit(unsigned int vq)
{
return SVE_VQ_MAX - vq;
}
static unsigned int bit_to_vq(unsigned int bit)
{
if (WARN_ON(bit >= SVE_VQ_MAX))
bit = SVE_VQ_MAX - 1;
return SVE_VQ_MAX - bit;
}
/* /*
* All vector length selection from userspace comes through here. * All vector length selection from userspace comes through here.
* We're on a slow path, so some sanity-checks are included. * We're on a slow path, so some sanity-checks are included.
...@@ -309,8 +290,8 @@ static unsigned int find_supported_vector_length(unsigned int vl) ...@@ -309,8 +290,8 @@ static unsigned int find_supported_vector_length(unsigned int vl)
vl = max_vl; vl = max_vl;
bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
vq_to_bit(sve_vq_from_vl(vl))); __vq_to_bit(sve_vq_from_vl(vl)));
return sve_vl_from_vq(bit_to_vq(bit)); return sve_vl_from_vq(__bit_to_vq(bit));
} }
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
...@@ -648,7 +629,7 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) ...@@ -648,7 +629,7 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
vl = sve_get_vl(); vl = sve_get_vl();
vq = sve_vq_from_vl(vl); /* skip intervening lengths */ vq = sve_vq_from_vl(vl); /* skip intervening lengths */
set_bit(vq_to_bit(vq), map); set_bit(__vq_to_bit(vq), map);
} }
} }
...@@ -717,7 +698,7 @@ int sve_verify_vq_map(void) ...@@ -717,7 +698,7 @@ int sve_verify_vq_map(void)
* Mismatches above sve_max_virtualisable_vl are fine, since * Mismatches above sve_max_virtualisable_vl are fine, since
* no guest is allowed to configure ZCR_EL2.LEN to exceed this: * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
*/ */
if (sve_vl_from_vq(bit_to_vq(b)) <= sve_max_virtualisable_vl) { if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
smp_processor_id()); smp_processor_id());
return -EINVAL; return -EINVAL;
...@@ -801,8 +782,8 @@ void __init sve_setup(void) ...@@ -801,8 +782,8 @@ void __init sve_setup(void)
* so sve_vq_map must have at least SVE_VQ_MIN set. * so sve_vq_map must have at least SVE_VQ_MIN set.
* If something went wrong, at least try to patch it up: * If something went wrong, at least try to patch it up:
*/ */
if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
...@@ -831,7 +812,7 @@ void __init sve_setup(void) ...@@ -831,7 +812,7 @@ void __init sve_setup(void)
/* No virtualisable VLs? This is architecturally forbidden. */ /* No virtualisable VLs? This is architecturally forbidden. */
sve_max_virtualisable_vl = SVE_VQ_MIN; sve_max_virtualisable_vl = SVE_VQ_MIN;
else /* b + 1 < SVE_VQ_MAX */ else /* b + 1 < SVE_VQ_MAX */
sve_max_virtualisable_vl = sve_vl_from_vq(bit_to_vq(b + 1)); sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
if (sve_max_virtualisable_vl > sve_max_vl) if (sve_max_virtualisable_vl > sve_max_vl)
sve_max_virtualisable_vl = sve_max_vl; sve_max_virtualisable_vl = sve_max_vl;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment