Commit b5bc00ff authored by Mark Brown's avatar Mark Brown Committed by Will Deacon

arm64/sve: Put system wide vector length information into structs

With the introduction of SME we will have a second vector length in the
system, enumerated and configured in a very similar fashion to the
existing SVE vector length.  While there are a few differences in how
things are handled this is a relatively small portion of the overall
code so in order to avoid code duplication we factor out

We create two structs, one vl_info for the static hardware properties
and one vl_config for the runtime configuration, with an array
instantiated for each and update all the users to reference these. Some
accessor functions are provided where helpful for readability, and the
write to set the vector length is put into a function since the system
register being updated needs to be chosen at compile time.

This is a mostly mechanical replacement, further work will be required
to actually make things generic, ensuring that we handle those places
where there are differences properly.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20211019172247.3045838-8-broonie@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 0423eedc
...@@ -77,10 +77,6 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); ...@@ -77,10 +77,6 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void); extern u64 read_zcr_features(void);
extern int __ro_after_init sve_max_vl;
extern int __ro_after_init sve_max_virtualisable_vl;
extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
/* /*
* Helpers to translate bit indices in sve_vq_map to VQ values (and * Helpers to translate bit indices in sve_vq_map to VQ values (and
* vice versa). This allows find_next_bit() to be used to find the * vice versa). This allows find_next_bit() to be used to find the
...@@ -96,11 +92,27 @@ static inline unsigned int __bit_to_vq(unsigned int bit) ...@@ -96,11 +92,27 @@ static inline unsigned int __bit_to_vq(unsigned int bit)
return SVE_VQ_MAX - bit; return SVE_VQ_MAX - bit;
} }
/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
static inline bool sve_vq_available(unsigned int vq) struct vl_info {
{ enum vec_type type;
return test_bit(__vq_to_bit(vq), sve_vq_map); const char *name; /* For display purposes */
}
/* Minimum supported vector length across all CPUs */
int min_vl;
/* Maximum supported vector length across all CPUs */
int max_vl;
int max_virtualisable_vl;
/*
* Set of available vector lengths,
* where length vq encoded as bit __vq_to_bit(vq):
*/
DECLARE_BITMAP(vq_map, SVE_VQ_MAX);
/* Set of vector lengths present on at least one cpu: */
DECLARE_BITMAP(vq_partial_map, SVE_VQ_MAX);
};
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
...@@ -139,11 +151,63 @@ static inline void sve_user_enable(void) ...@@ -139,11 +151,63 @@ static inline void sve_user_enable(void)
* Probing and setup functions. * Probing and setup functions.
* Calls to these functions must be serialised with one another. * Calls to these functions must be serialised with one another.
*/ */
extern void __init sve_init_vq_map(void); enum vec_type;
extern void sve_update_vq_map(void);
extern int sve_verify_vq_map(void); extern void __init vec_init_vq_map(enum vec_type type);
extern void vec_update_vq_map(enum vec_type type);
extern int vec_verify_vq_map(enum vec_type type);
extern void __init sve_setup(void); extern void __init sve_setup(void);
extern __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX];
static inline void write_vl(enum vec_type type, u64 val)
{
u64 tmp;
switch (type) {
#ifdef CONFIG_ARM64_SVE
case ARM64_VEC_SVE:
tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK;
write_sysreg_s(tmp | val, SYS_ZCR_EL1);
break;
#endif
default:
WARN_ON_ONCE(1);
break;
}
}
static inline int vec_max_vl(enum vec_type type)
{
return vl_info[type].max_vl;
}
static inline int vec_max_virtualisable_vl(enum vec_type type)
{
return vl_info[type].max_virtualisable_vl;
}
static inline int sve_max_vl(void)
{
return vec_max_vl(ARM64_VEC_SVE);
}
static inline int sve_max_virtualisable_vl(void)
{
return vec_max_virtualisable_vl(ARM64_VEC_SVE);
}
/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
static inline bool vq_available(enum vec_type type, unsigned int vq)
{
return test_bit(__vq_to_bit(vq), vl_info[type].vq_map);
}
static inline bool sve_vq_available(unsigned int vq)
{
return vq_available(ARM64_VEC_SVE, vq);
}
#else /* ! CONFIG_ARM64_SVE */ #else /* ! CONFIG_ARM64_SVE */
static inline void sve_alloc(struct task_struct *task) { } static inline void sve_alloc(struct task_struct *task) { }
...@@ -161,14 +225,21 @@ static inline int sve_get_current_vl(void) ...@@ -161,14 +225,21 @@ static inline int sve_get_current_vl(void)
return -EINVAL; return -EINVAL;
} }
static inline int sve_max_vl(void)
{
return -EINVAL;
}
static inline bool sve_vq_available(unsigned int vq) { return false; }
static inline void sve_user_disable(void) { BUILD_BUG(); } static inline void sve_user_disable(void) { BUILD_BUG(); }
static inline void sve_user_enable(void) { BUILD_BUG(); } static inline void sve_user_enable(void) { BUILD_BUG(); }
#define sve_cond_update_zcr_vq(val, reg) do { } while (0) #define sve_cond_update_zcr_vq(val, reg) do { } while (0)
static inline void sve_init_vq_map(void) { } static inline void vec_init_vq_map(enum vec_type t) { }
static inline void sve_update_vq_map(void) { } static inline void vec_update_vq_map(enum vec_type t) { }
static inline int sve_verify_vq_map(void) { return 0; } static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
static inline void sve_setup(void) { } static inline void sve_setup(void) { }
#endif /* ! CONFIG_ARM64_SVE */ #endif /* ! CONFIG_ARM64_SVE */
......
...@@ -115,6 +115,11 @@ struct debug_info { ...@@ -115,6 +115,11 @@ struct debug_info {
#endif #endif
}; };
enum vec_type {
ARM64_VEC_SVE = 0,
ARM64_VEC_MAX,
};
struct cpu_context { struct cpu_context {
unsigned long x19; unsigned long x19;
unsigned long x20; unsigned long x20;
......
...@@ -941,7 +941,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) ...@@ -941,7 +941,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
sve_init_vq_map(); vec_init_vq_map(ARM64_VEC_SVE);
} }
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
...@@ -1175,7 +1175,7 @@ void update_cpu_features(int cpu, ...@@ -1175,7 +1175,7 @@ void update_cpu_features(int cpu,
/* Probe vector lengths, unless we already gave up on SVE */ /* Probe vector lengths, unless we already gave up on SVE */
if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) && if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
!system_capabilities_finalized()) !system_capabilities_finalized())
sve_update_vq_map(); vec_update_vq_map(ARM64_VEC_SVE);
} }
/* /*
...@@ -2739,7 +2739,7 @@ static void verify_sve_features(void) ...@@ -2739,7 +2739,7 @@ static void verify_sve_features(void)
unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK; unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
unsigned int len = zcr & ZCR_ELx_LEN_MASK; unsigned int len = zcr & ZCR_ELx_LEN_MASK;
if (len < safe_len || sve_verify_vq_map()) { if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) {
pr_crit("CPU%d: SVE: vector length support mismatch\n", pr_crit("CPU%d: SVE: vector length support mismatch\n",
smp_processor_id()); smp_processor_id());
cpu_die_early(); cpu_die_early();
......
This diff is collapsed.
...@@ -728,7 +728,7 @@ static void sve_init_header_from_task(struct user_sve_header *header, ...@@ -728,7 +728,7 @@ static void sve_init_header_from_task(struct user_sve_header *header,
header->vl = task_get_sve_vl(target); header->vl = task_get_sve_vl(target);
vq = sve_vq_from_vl(header->vl); vq = sve_vq_from_vl(header->vl);
header->max_vl = sve_max_vl; header->max_vl = sve_max_vl();
header->size = SVE_PT_SIZE(vq, header->flags); header->size = SVE_PT_SIZE(vq, header->flags);
header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
SVE_PT_REGS_SVE); SVE_PT_REGS_SVE);
......
...@@ -594,7 +594,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, ...@@ -594,7 +594,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
unsigned int vq = 0; unsigned int vq = 0;
if (add_all || test_thread_flag(TIF_SVE)) { if (add_all || test_thread_flag(TIF_SVE)) {
int vl = sve_max_vl; int vl = sve_max_vl();
if (!add_all) if (!add_all)
vl = task_get_sve_vl(current); vl = task_get_sve_vl(current);
......
...@@ -46,7 +46,7 @@ unsigned int kvm_sve_max_vl; ...@@ -46,7 +46,7 @@ unsigned int kvm_sve_max_vl;
int kvm_arm_init_sve(void) int kvm_arm_init_sve(void)
{ {
if (system_supports_sve()) { if (system_supports_sve()) {
kvm_sve_max_vl = sve_max_virtualisable_vl; kvm_sve_max_vl = sve_max_virtualisable_vl();
/* /*
* The get_sve_reg()/set_sve_reg() ioctl interface will need * The get_sve_reg()/set_sve_reg() ioctl interface will need
...@@ -61,7 +61,7 @@ int kvm_arm_init_sve(void) ...@@ -61,7 +61,7 @@ int kvm_arm_init_sve(void)
* Don't even try to make use of vector lengths that * Don't even try to make use of vector lengths that
* aren't available on all CPUs, for now: * aren't available on all CPUs, for now:
*/ */
if (kvm_sve_max_vl < sve_max_vl) if (kvm_sve_max_vl < sve_max_vl())
pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
kvm_sve_max_vl); kvm_sve_max_vl);
} }
...@@ -102,7 +102,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) ...@@ -102,7 +102,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
* set_sve_vls(). Double-check here just to be sure: * set_sve_vls(). Double-check here just to be sure:
*/ */
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
vl > SVE_VL_ARCH_MAX)) vl > SVE_VL_ARCH_MAX))
return -EIO; return -EIO;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment