Commit ce0a1b60 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Borislav Petkov (AMD)

x86/paravirt: Silence unused native_pv_lock_init() function warning

The native_pv_lock_init() function is only used in SMP configurations
and declared in asm/qspinlock.h which is not used in UP kernels, but
the function is still defined for both, which causes a warning:

  arch/x86/kernel/paravirt.c:76:13: error: no previous prototype for 'native_pv_lock_init' [-Werror=missing-prototypes]

Move the declaration to asm/paravirt.h so it is visible even
with CONFIG_SMP but short-circuit the definition to turn it
into an empty function.
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230803082619.1369127-7-arnd@kernel.org
parent 1a3e4b4d
...@@ -739,6 +739,7 @@ static __always_inline unsigned long arch_local_irq_save(void) ...@@ -739,6 +739,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
".popsection") ".popsection")
extern void default_banner(void); extern void default_banner(void);
void native_pv_lock_init(void) __init;
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
...@@ -778,6 +779,12 @@ extern void default_banner(void); ...@@ -778,6 +779,12 @@ extern void default_banner(void);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#else /* CONFIG_PARAVIRT */ #else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop # define default_banner x86_init_noop
#ifndef __ASSEMBLY__
static inline void native_pv_lock_init(void)
{
}
#endif
#endif /* !CONFIG_PARAVIRT */ #endif /* !CONFIG_PARAVIRT */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -74,8 +74,6 @@ static inline bool vcpu_is_preempted(long cpu) ...@@ -74,8 +74,6 @@ static inline bool vcpu_is_preempted(long cpu)
*/ */
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
void native_pv_lock_init(void) __init;
/* /*
* Shortcut for the queued_spin_lock_slowpath() function that allows * Shortcut for the queued_spin_lock_slowpath() function that allows
* virt to hijack it. * virt to hijack it.
...@@ -103,10 +101,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock) ...@@ -103,10 +101,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true; return true;
} }
#else
static inline void native_pv_lock_init(void)
{
}
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h> #include <asm-generic/qspinlock.h>
......
...@@ -75,7 +75,8 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); ...@@ -75,7 +75,8 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
void __init native_pv_lock_init(void) void __init native_pv_lock_init(void)
{ {
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key); static_branch_disable(&virt_spin_lock_key);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment