Commit 655ee557 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/sve' into for-next/core

* for-next/sve:
  arm64/sve: Fix warnings when SVE is disabled
  arm64/sve: Add stub for sve_max_virtualisable_vl()
  arm64/sve: Track vector lengths for tasks in an array
  arm64/sve: Explicitly load vector length when restoring SVE state
  arm64/sve: Put system wide vector length information into structs
  arm64/sve: Use accessor functions for vector lengths in thread_struct
  arm64/sve: Rename find_supported_vector_length()
  arm64/sve: Make access to FFR optional
  arm64/sve: Make sve_state_size() static
  arm64/sve: Remove sve_load_from_fpsimd_state()
  arm64/fp: Reindent fpsimd_save()
parents 3d9c8315 04ee53a5
...@@ -62,15 +62,13 @@ static inline size_t sve_ffr_offset(int vl) ...@@ -62,15 +62,13 @@ static inline size_t sve_ffr_offset(int vl)
static inline void *sve_pffr(struct thread_struct *thread) static inline void *sve_pffr(struct thread_struct *thread)
{ {
return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl); return (char *)thread->sve_state + sve_ffr_offset(thread_get_sve_vl(thread));
} }
extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
extern void sve_load_state(void const *state, u32 const *pfpsr, extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1); int restore_ffr);
extern void sve_flush_live(unsigned long vq_minus_1); extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void); extern unsigned int sve_get_vl(void);
extern void sve_set_vq(unsigned long vq_minus_1); extern void sve_set_vq(unsigned long vq_minus_1);
...@@ -79,10 +77,6 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); ...@@ -79,10 +77,6 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void); extern u64 read_zcr_features(void);
extern int __ro_after_init sve_max_vl;
extern int __ro_after_init sve_max_virtualisable_vl;
extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
/* /*
* Helpers to translate bit indices in sve_vq_map to VQ values (and * Helpers to translate bit indices in sve_vq_map to VQ values (and
* vice versa). This allows find_next_bit() to be used to find the * vice versa). This allows find_next_bit() to be used to find the
...@@ -98,15 +92,29 @@ static inline unsigned int __bit_to_vq(unsigned int bit) ...@@ -98,15 +92,29 @@ static inline unsigned int __bit_to_vq(unsigned int bit)
return SVE_VQ_MAX - bit; return SVE_VQ_MAX - bit;
} }
/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
static inline bool sve_vq_available(unsigned int vq)
{
return test_bit(__vq_to_bit(vq), sve_vq_map);
}
#ifdef CONFIG_ARM64_SVE struct vl_info {
enum vec_type type;
const char *name; /* For display purposes */
extern size_t sve_state_size(struct task_struct const *task); /* Minimum supported vector length across all CPUs */
int min_vl;
/* Maximum supported vector length across all CPUs */
int max_vl;
int max_virtualisable_vl;
/*
* Set of available vector lengths,
* where length vq encoded as bit __vq_to_bit(vq):
*/
DECLARE_BITMAP(vq_map, SVE_VQ_MAX);
/* Set of vector lengths present on at least one cpu: */
DECLARE_BITMAP(vq_partial_map, SVE_VQ_MAX);
};
#ifdef CONFIG_ARM64_SVE
extern void sve_alloc(struct task_struct *task); extern void sve_alloc(struct task_struct *task);
extern void fpsimd_release_task(struct task_struct *task); extern void fpsimd_release_task(struct task_struct *task);
...@@ -143,11 +151,63 @@ static inline void sve_user_enable(void) ...@@ -143,11 +151,63 @@ static inline void sve_user_enable(void)
* Probing and setup functions. * Probing and setup functions.
* Calls to these functions must be serialised with one another. * Calls to these functions must be serialised with one another.
*/ */
extern void __init sve_init_vq_map(void); enum vec_type;
extern void sve_update_vq_map(void);
extern int sve_verify_vq_map(void); extern void __init vec_init_vq_map(enum vec_type type);
extern void vec_update_vq_map(enum vec_type type);
extern int vec_verify_vq_map(enum vec_type type);
extern void __init sve_setup(void); extern void __init sve_setup(void);
extern __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX];
static inline void write_vl(enum vec_type type, u64 val)
{
u64 tmp;
switch (type) {
#ifdef CONFIG_ARM64_SVE
case ARM64_VEC_SVE:
tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK;
write_sysreg_s(tmp | val, SYS_ZCR_EL1);
break;
#endif
default:
WARN_ON_ONCE(1);
break;
}
}
static inline int vec_max_vl(enum vec_type type)
{
return vl_info[type].max_vl;
}
static inline int vec_max_virtualisable_vl(enum vec_type type)
{
return vl_info[type].max_virtualisable_vl;
}
static inline int sve_max_vl(void)
{
return vec_max_vl(ARM64_VEC_SVE);
}
static inline int sve_max_virtualisable_vl(void)
{
return vec_max_virtualisable_vl(ARM64_VEC_SVE);
}
/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
static inline bool vq_available(enum vec_type type, unsigned int vq)
{
return test_bit(__vq_to_bit(vq), vl_info[type].vq_map);
}
static inline bool sve_vq_available(unsigned int vq)
{
return vq_available(ARM64_VEC_SVE, vq);
}
#else /* ! CONFIG_ARM64_SVE */ #else /* ! CONFIG_ARM64_SVE */
static inline void sve_alloc(struct task_struct *task) { } static inline void sve_alloc(struct task_struct *task) { }
...@@ -155,6 +215,11 @@ static inline void fpsimd_release_task(struct task_struct *task) { } ...@@ -155,6 +215,11 @@ static inline void fpsimd_release_task(struct task_struct *task) { }
static inline void sve_sync_to_fpsimd(struct task_struct *task) { } static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { } static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
static inline int sve_max_virtualisable_vl(void)
{
return 0;
}
static inline int sve_set_current_vl(unsigned long arg) static inline int sve_set_current_vl(unsigned long arg)
{ {
return -EINVAL; return -EINVAL;
...@@ -165,14 +230,21 @@ static inline int sve_get_current_vl(void) ...@@ -165,14 +230,21 @@ static inline int sve_get_current_vl(void)
return -EINVAL; return -EINVAL;
} }
static inline int sve_max_vl(void)
{
return -EINVAL;
}
static inline bool sve_vq_available(unsigned int vq) { return false; }
static inline void sve_user_disable(void) { BUILD_BUG(); } static inline void sve_user_disable(void) { BUILD_BUG(); }
static inline void sve_user_enable(void) { BUILD_BUG(); } static inline void sve_user_enable(void) { BUILD_BUG(); }
#define sve_cond_update_zcr_vq(val, reg) do { } while (0) #define sve_cond_update_zcr_vq(val, reg) do { } while (0)
static inline void sve_init_vq_map(void) { } static inline void vec_init_vq_map(enum vec_type t) { }
static inline void sve_update_vq_map(void) { } static inline void vec_update_vq_map(enum vec_type t) { }
static inline int sve_verify_vq_map(void) { return 0; } static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
static inline void sve_setup(void) { } static inline void sve_setup(void) { }
#endif /* ! CONFIG_ARM64_SVE */ #endif /* ! CONFIG_ARM64_SVE */
......
...@@ -217,28 +217,36 @@ ...@@ -217,28 +217,36 @@
.macro sve_flush_z .macro sve_flush_z
_for n, 0, 31, _sve_flush_z \n _for n, 0, 31, _sve_flush_z \n
.endm .endm
.macro sve_flush_p_ffr .macro sve_flush_p
_for n, 0, 15, _sve_pfalse \n _for n, 0, 15, _sve_pfalse \n
.endm
.macro sve_flush_ffr
_sve_wrffr 0 _sve_wrffr 0
.endm .endm
.macro sve_save nxbase, xpfpsr, nxtmp .macro sve_save nxbase, xpfpsr, save_ffr, nxtmp
_for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34 _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34
_for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16 _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
cbz \save_ffr, 921f
_sve_rdffr 0 _sve_rdffr 0
_sve_str_p 0, \nxbase _sve_str_p 0, \nxbase
_sve_ldr_p 0, \nxbase, -16 _sve_ldr_p 0, \nxbase, -16
b 922f
921:
str xzr, [x\nxbase] // Zero out FFR
922:
mrs x\nxtmp, fpsr mrs x\nxtmp, fpsr
str w\nxtmp, [\xpfpsr] str w\nxtmp, [\xpfpsr]
mrs x\nxtmp, fpcr mrs x\nxtmp, fpcr
str w\nxtmp, [\xpfpsr, #4] str w\nxtmp, [\xpfpsr, #4]
.endm .endm
.macro __sve_load nxbase, xpfpsr, nxtmp .macro sve_load nxbase, xpfpsr, restore_ffr, nxtmp
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
cbz \restore_ffr, 921f
_sve_ldr_p 0, \nxbase _sve_ldr_p 0, \nxbase
_sve_wrffr 0 _sve_wrffr 0
921:
_for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16 _for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16
ldr w\nxtmp, [\xpfpsr] ldr w\nxtmp, [\xpfpsr]
...@@ -246,8 +254,3 @@ ...@@ -246,8 +254,3 @@
ldr w\nxtmp, [\xpfpsr, #4] ldr w\nxtmp, [\xpfpsr, #4]
msr fpcr, x\nxtmp msr fpcr, x\nxtmp
.endm .endm
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
__sve_load \nxbase, \xpfpsr, \nxtmp
.endm
...@@ -115,6 +115,11 @@ struct debug_info { ...@@ -115,6 +115,11 @@ struct debug_info {
#endif #endif
}; };
enum vec_type {
ARM64_VEC_SVE = 0,
ARM64_VEC_MAX,
};
struct cpu_context { struct cpu_context {
unsigned long x19; unsigned long x19;
unsigned long x20; unsigned long x20;
...@@ -147,8 +152,8 @@ struct thread_struct { ...@@ -147,8 +152,8 @@ struct thread_struct {
unsigned int fpsimd_cpu; unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */ void *sve_state; /* SVE registers, if any */
unsigned int sve_vl; /* SVE vector length */ unsigned int vl[ARM64_VEC_MAX]; /* vector length */
unsigned int sve_vl_onexec; /* SVE vl after next exec */ unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
unsigned long fault_address; /* fault info */ unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */ unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */ struct debug_info debug; /* debugging */
...@@ -164,6 +169,46 @@ struct thread_struct { ...@@ -164,6 +169,46 @@ struct thread_struct {
u64 sctlr_user; u64 sctlr_user;
}; };
static inline unsigned int thread_get_vl(struct thread_struct *thread,
enum vec_type type)
{
return thread->vl[type];
}
static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
{
return thread_get_vl(thread, ARM64_VEC_SVE);
}
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
void task_set_vl(struct task_struct *task, enum vec_type type,
unsigned long vl);
void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
unsigned long vl);
unsigned int task_get_vl_onexec(const struct task_struct *task,
enum vec_type type);
static inline unsigned int task_get_sve_vl(const struct task_struct *task)
{
return task_get_vl(task, ARM64_VEC_SVE);
}
static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
{
task_set_vl(task, ARM64_VEC_SVE, vl);
}
static inline unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
{
return task_get_vl_onexec(task, ARM64_VEC_SVE);
}
static inline void task_set_sve_vl_onexec(struct task_struct *task,
unsigned long vl)
{
task_set_vl_onexec(task, ARM64_VEC_SVE, vl);
}
#define SCTLR_USER_MASK \ #define SCTLR_USER_MASK \
(SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \ (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \
SCTLR_EL1_TCF0_MASK) SCTLR_EL1_TCF0_MASK)
......
...@@ -78,7 +78,7 @@ int arch_dup_task_struct(struct task_struct *dst, ...@@ -78,7 +78,7 @@ int arch_dup_task_struct(struct task_struct *dst,
#define TIF_SINGLESTEP 21 #define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */ #define TIF_32BIT 22 /* 32bit process */
#define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE 23 /* Scalable Vector Extension in use */
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */ #define TIF_SSBD 25 /* Wants SSB mitigation */
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ #define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
......
...@@ -941,7 +941,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) ...@@ -941,7 +941,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
sve_init_vq_map(); vec_init_vq_map(ARM64_VEC_SVE);
} }
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
...@@ -1175,7 +1175,7 @@ void update_cpu_features(int cpu, ...@@ -1175,7 +1175,7 @@ void update_cpu_features(int cpu,
/* Probe vector lengths, unless we already gave up on SVE */ /* Probe vector lengths, unless we already gave up on SVE */
if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) && if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
!system_capabilities_finalized()) !system_capabilities_finalized())
sve_update_vq_map(); vec_update_vq_map(ARM64_VEC_SVE);
} }
/* /*
...@@ -2760,7 +2760,7 @@ static void verify_sve_features(void) ...@@ -2760,7 +2760,7 @@ static void verify_sve_features(void)
unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK; unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
unsigned int len = zcr & ZCR_ELx_LEN_MASK; unsigned int len = zcr & ZCR_ELx_LEN_MASK;
if (len < safe_len || sve_verify_vq_map()) { if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) {
pr_crit("CPU%d: SVE: vector length support mismatch\n", pr_crit("CPU%d: SVE: vector length support mismatch\n",
smp_processor_id()); smp_processor_id());
cpu_die_early(); cpu_die_early();
......
...@@ -38,9 +38,10 @@ SYM_FUNC_END(fpsimd_load_state) ...@@ -38,9 +38,10 @@ SYM_FUNC_END(fpsimd_load_state)
* *
* x0 - pointer to buffer for state * x0 - pointer to buffer for state
* x1 - pointer to storage for FPSR * x1 - pointer to storage for FPSR
* x2 - Save FFR if non-zero
*/ */
SYM_FUNC_START(sve_save_state) SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2 sve_save 0, x1, x2, 3
ret ret
SYM_FUNC_END(sve_save_state) SYM_FUNC_END(sve_save_state)
...@@ -49,10 +50,10 @@ SYM_FUNC_END(sve_save_state) ...@@ -49,10 +50,10 @@ SYM_FUNC_END(sve_save_state)
* *
* x0 - pointer to buffer for state * x0 - pointer to buffer for state
* x1 - pointer to storage for FPSR * x1 - pointer to storage for FPSR
* x2 - VQ-1 * x2 - Restore FFR if non-zero
*/ */
SYM_FUNC_START(sve_load_state) SYM_FUNC_START(sve_load_state)
sve_load 0, x1, x2, 3, x4 sve_load 0, x1, x2, 4
ret ret
SYM_FUNC_END(sve_load_state) SYM_FUNC_END(sve_load_state)
...@@ -66,35 +67,22 @@ SYM_FUNC_START(sve_set_vq) ...@@ -66,35 +67,22 @@ SYM_FUNC_START(sve_set_vq)
ret ret
SYM_FUNC_END(sve_set_vq) SYM_FUNC_END(sve_set_vq)
/*
* Load SVE state from FPSIMD state.
*
* x0 = pointer to struct fpsimd_state
* x1 = VQ - 1
*
* Each SVE vector will be loaded with the first 128-bits taken from FPSIMD
* and the rest zeroed. All the other SVE registers will be zeroed.
*/
SYM_FUNC_START(sve_load_from_fpsimd_state)
sve_load_vq x1, x2, x3
fpsimd_restore x0, 8
sve_flush_p_ffr
ret
SYM_FUNC_END(sve_load_from_fpsimd_state)
/* /*
* Zero all SVE registers but the first 128-bits of each vector * Zero all SVE registers but the first 128-bits of each vector
* *
* VQ must already be configured by caller, any further updates of VQ * VQ must already be configured by caller, any further updates of VQ
* will need to ensure that the register state remains valid. * will need to ensure that the register state remains valid.
* *
* x0 = VQ - 1 * x0 = include FFR?
* x1 = VQ - 1
*/ */
SYM_FUNC_START(sve_flush_live) SYM_FUNC_START(sve_flush_live)
cbz x0, 1f // A VQ-1 of 0 is 128 bits so no extra Z state cbz x1, 1f // A VQ-1 of 0 is 128 bits so no extra Z state
sve_flush_z sve_flush_z
1: sve_flush_p_ffr 1: sve_flush_p
ret tbz x0, #0, 2f
sve_flush_ffr
2: ret
SYM_FUNC_END(sve_flush_live) SYM_FUNC_END(sve_flush_live)
#endif /* CONFIG_ARM64_SVE */ #endif /* CONFIG_ARM64_SVE */
This diff is collapsed.
...@@ -725,10 +725,10 @@ static void sve_init_header_from_task(struct user_sve_header *header, ...@@ -725,10 +725,10 @@ static void sve_init_header_from_task(struct user_sve_header *header,
if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
header->flags |= SVE_PT_VL_INHERIT; header->flags |= SVE_PT_VL_INHERIT;
header->vl = target->thread.sve_vl; header->vl = task_get_sve_vl(target);
vq = sve_vq_from_vl(header->vl); vq = sve_vq_from_vl(header->vl);
header->max_vl = sve_max_vl; header->max_vl = sve_max_vl();
header->size = SVE_PT_SIZE(vq, header->flags); header->size = SVE_PT_SIZE(vq, header->flags);
header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
SVE_PT_REGS_SVE); SVE_PT_REGS_SVE);
...@@ -820,7 +820,7 @@ static int sve_set(struct task_struct *target, ...@@ -820,7 +820,7 @@ static int sve_set(struct task_struct *target,
goto out; goto out;
/* Actual VL set may be less than the user asked for: */ /* Actual VL set may be less than the user asked for: */
vq = sve_vq_from_vl(target->thread.sve_vl); vq = sve_vq_from_vl(task_get_sve_vl(target));
/* Registers: FPSIMD-only case */ /* Registers: FPSIMD-only case */
......
...@@ -227,7 +227,7 @@ static int preserve_sve_context(struct sve_context __user *ctx) ...@@ -227,7 +227,7 @@ static int preserve_sve_context(struct sve_context __user *ctx)
{ {
int err = 0; int err = 0;
u16 reserved[ARRAY_SIZE(ctx->__reserved)]; u16 reserved[ARRAY_SIZE(ctx->__reserved)];
unsigned int vl = current->thread.sve_vl; unsigned int vl = task_get_sve_vl(current);
unsigned int vq = 0; unsigned int vq = 0;
if (test_thread_flag(TIF_SVE)) if (test_thread_flag(TIF_SVE))
...@@ -266,7 +266,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) ...@@ -266,7 +266,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (__copy_from_user(&sve, user->sve, sizeof(sve))) if (__copy_from_user(&sve, user->sve, sizeof(sve)))
return -EFAULT; return -EFAULT;
if (sve.vl != current->thread.sve_vl) if (sve.vl != task_get_sve_vl(current))
return -EINVAL; return -EINVAL;
if (sve.head.size <= sizeof(*user->sve)) { if (sve.head.size <= sizeof(*user->sve)) {
...@@ -594,10 +594,10 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, ...@@ -594,10 +594,10 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
unsigned int vq = 0; unsigned int vq = 0;
if (add_all || test_thread_flag(TIF_SVE)) { if (add_all || test_thread_flag(TIF_SVE)) {
int vl = sve_max_vl; int vl = sve_max_vl();
if (!add_all) if (!add_all)
vl = current->thread.sve_vl; vl = task_get_sve_vl(current);
vq = sve_vq_from_vl(vl); vq = sve_vq_from_vl(vl);
} }
......
...@@ -21,11 +21,13 @@ SYM_FUNC_START(__fpsimd_restore_state) ...@@ -21,11 +21,13 @@ SYM_FUNC_START(__fpsimd_restore_state)
SYM_FUNC_END(__fpsimd_restore_state) SYM_FUNC_END(__fpsimd_restore_state)
SYM_FUNC_START(__sve_restore_state) SYM_FUNC_START(__sve_restore_state)
__sve_load 0, x1, 2 mov x2, #1
sve_load 0, x1, x2, 3
ret ret
SYM_FUNC_END(__sve_restore_state) SYM_FUNC_END(__sve_restore_state)
SYM_FUNC_START(__sve_save_state) SYM_FUNC_START(__sve_save_state)
sve_save 0, x1, 2 mov x2, #1
sve_save 0, x1, x2, 3
ret ret
SYM_FUNC_END(__sve_save_state) SYM_FUNC_END(__sve_save_state)
...@@ -46,7 +46,7 @@ unsigned int kvm_sve_max_vl; ...@@ -46,7 +46,7 @@ unsigned int kvm_sve_max_vl;
int kvm_arm_init_sve(void) int kvm_arm_init_sve(void)
{ {
if (system_supports_sve()) { if (system_supports_sve()) {
kvm_sve_max_vl = sve_max_virtualisable_vl; kvm_sve_max_vl = sve_max_virtualisable_vl();
/* /*
* The get_sve_reg()/set_sve_reg() ioctl interface will need * The get_sve_reg()/set_sve_reg() ioctl interface will need
...@@ -61,7 +61,7 @@ int kvm_arm_init_sve(void) ...@@ -61,7 +61,7 @@ int kvm_arm_init_sve(void)
* Don't even try to make use of vector lengths that * Don't even try to make use of vector lengths that
* aren't available on all CPUs, for now: * aren't available on all CPUs, for now:
*/ */
if (kvm_sve_max_vl < sve_max_vl) if (kvm_sve_max_vl < sve_max_vl())
pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
kvm_sve_max_vl); kvm_sve_max_vl);
} }
...@@ -102,7 +102,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) ...@@ -102,7 +102,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
* set_sve_vls(). Double-check here just to be sure: * set_sve_vls(). Double-check here just to be sure:
*/ */
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
vl > SVE_VL_ARCH_MAX)) vl > SVE_VL_ARCH_MAX))
return -EIO; return -EIO;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment