Commit 57ad19bc authored by Alexander Gordeev's avatar Alexander Gordeev

s390/boot: cleanup adjust_to_uv_max() function

Uncouple input and output arguments by making the latter
the function return value.
Reviewed-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent 6f5c672d
...@@ -152,6 +152,7 @@ static void setup_kernel_memory_layout(void) ...@@ -152,6 +152,7 @@ static void setup_kernel_memory_layout(void)
unsigned long vmemmap_start; unsigned long vmemmap_start;
unsigned long rte_size; unsigned long rte_size;
unsigned long pages; unsigned long pages;
unsigned long vmax;
pages = ident_map_size / PAGE_SIZE; pages = ident_map_size / PAGE_SIZE;
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
...@@ -163,10 +164,10 @@ static void setup_kernel_memory_layout(void) ...@@ -163,10 +164,10 @@ static void setup_kernel_memory_layout(void)
vmalloc_size > _REGION2_SIZE || vmalloc_size > _REGION2_SIZE ||
vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN > vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
_REGION2_SIZE) { _REGION2_SIZE) {
MODULES_END = _REGION1_SIZE; vmax = _REGION1_SIZE;
rte_size = _REGION2_SIZE; rte_size = _REGION2_SIZE;
} else { } else {
MODULES_END = _REGION2_SIZE; vmax = _REGION2_SIZE;
rte_size = _REGION3_SIZE; rte_size = _REGION3_SIZE;
} }
/* /*
...@@ -174,11 +175,12 @@ static void setup_kernel_memory_layout(void) ...@@ -174,11 +175,12 @@ static void setup_kernel_memory_layout(void)
* secure storage limit, so that any vmalloc allocation * secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage. * we do could be used to back secure guest storage.
*/ */
adjust_to_uv_max(&MODULES_END); vmax = adjust_to_uv_max(vmax);
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
/* force vmalloc and modules below kasan shadow */ /* force vmalloc and modules below kasan shadow */
MODULES_END = min(MODULES_END, KASAN_SHADOW_START); vmax = min(vmax, KASAN_SHADOW_START);
#endif #endif
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN; MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR; VMALLOC_END = MODULES_VADDR;
......
...@@ -51,10 +51,11 @@ void uv_query_info(void) ...@@ -51,10 +51,11 @@ void uv_query_info(void)
} }
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
void adjust_to_uv_max(unsigned long *vmax) unsigned long adjust_to_uv_max(unsigned long limit)
{ {
if (is_prot_virt_host() && uv_info.max_sec_stor_addr) if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr); limit = min_t(unsigned long, limit, uv_info.max_sec_stor_addr);
return limit;
} }
static int is_prot_virt_host_capable(void) static int is_prot_virt_host_capable(void)
......
...@@ -3,10 +3,13 @@ ...@@ -3,10 +3,13 @@
#define BOOT_UV_H #define BOOT_UV_H
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
void adjust_to_uv_max(unsigned long *vmax); unsigned long adjust_to_uv_max(unsigned long limit);
void sanitize_prot_virt_host(void); void sanitize_prot_virt_host(void);
#else #else
static inline void adjust_to_uv_max(unsigned long *vmax) {} static inline unsigned long adjust_to_uv_max(unsigned long limit)
{
return limit;
}
static inline void sanitize_prot_virt_host(void) {} static inline void sanitize_prot_virt_host(void) {}
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment