Commit 9ec4ecef authored by AKASHI Takahiro's avatar AKASHI Takahiro Committed by Linus Torvalds

kexec_file,x86,powerpc: factor out kexec_file_ops functions

As arch_kexec_kernel_image_{probe,load}(),
arch_kimage_file_post_load_cleanup() and arch_kexec_kernel_verify_sig()
are almost duplicated among architectures, they can be commonalized with
an architecture-defined kexec_file_ops array.  So let's factor them out.

Link: http://lkml.kernel.org/r/20180306102303.9063-3-takahiro.akashi@linaro.orgSigned-off-by: default avatarAKASHI Takahiro <takahiro.akashi@linaro.org>
Acked-by: default avatarDave Young <dyoung@redhat.com>
Tested-by: default avatarDave Young <dyoung@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b799a09f
......@@ -95,7 +95,7 @@ static inline bool kdump_in_progress(void)
}
#ifdef CONFIG_KEXEC_FILE
extern struct kexec_file_ops kexec_elf64_ops;
extern const struct kexec_file_ops kexec_elf64_ops;
#ifdef CONFIG_IMA_KEXEC
#define ARCH_HAS_KIMAGE_ARCH
......
......@@ -657,7 +657,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
return ret ? ERR_PTR(ret) : fdt;
}
struct kexec_file_ops kexec_elf64_ops = {
const struct kexec_file_ops kexec_elf64_ops = {
.probe = elf64_probe,
.load = elf64_load,
};
......@@ -31,52 +31,19 @@
#define SLAVE_CODE_SIZE 256
static struct kexec_file_ops *kexec_file_loaders[] = {
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_elf64_ops,
NULL
};
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
int i, ret = -ENOEXEC;
struct kexec_file_ops *fops;
/* We don't support crash kernels yet. */
if (image->type == KEXEC_TYPE_CRASH)
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
fops = kexec_file_loaders[i];
if (!fops || !fops->probe)
continue;
ret = fops->probe(buf, buf_len);
if (!ret) {
image->fops = fops;
return ret;
}
}
return ret;
}
void *arch_kexec_kernel_image_load(struct kimage *image)
{
if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC);
return image->fops->load(image, image->kernel_buf,
image->kernel_buf_len, image->initrd_buf,
image->initrd_buf_len, image->cmdline_buf,
image->cmdline_buf_len);
}
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
return image->fops->cleanup(image->image_loader_data);
return kexec_image_probe_default(image, buf, buf_len);
}
/**
......
......@@ -2,6 +2,6 @@
#ifndef _ASM_KEXEC_BZIMAGE64_H
#define _ASM_KEXEC_BZIMAGE64_H
extern struct kexec_file_ops kexec_bzImage64_ops;
extern const struct kexec_file_ops kexec_bzImage64_ops;
#endif /* _ASM_KEXE_BZIMAGE64_H */
......@@ -538,7 +538,7 @@ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
}
#endif
struct kexec_file_ops kexec_bzImage64_ops = {
const struct kexec_file_ops kexec_bzImage64_ops = {
.probe = bzImage64_probe,
.load = bzImage64_load,
.cleanup = bzImage64_cleanup,
......
......@@ -30,8 +30,9 @@
#include <asm/set_memory.h>
#ifdef CONFIG_KEXEC_FILE
static struct kexec_file_ops *kexec_file_loaders[] = {
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_bzImage64_ops,
NULL
};
#endif
......@@ -364,27 +365,6 @@ void arch_crash_save_vmcoreinfo(void)
/* arch-dependent functionality related to kexec file-based syscall */
#ifdef CONFIG_KEXEC_FILE
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
int i, ret = -ENOEXEC;
struct kexec_file_ops *fops;
for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
fops = kexec_file_loaders[i];
if (!fops || !fops->probe)
continue;
ret = fops->probe(buf, buf_len);
if (!ret) {
image->fops = fops;
return ret;
}
}
return ret;
}
void *arch_kexec_kernel_image_load(struct kimage *image)
{
vfree(image->arch.elf_headers);
......@@ -399,27 +379,6 @@ void *arch_kexec_kernel_image_load(struct kimage *image)
image->cmdline_buf_len);
}
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
return image->fops->cleanup(image->image_loader_data);
}
#ifdef CONFIG_KEXEC_VERIFY_SIG
int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
unsigned long kernel_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.");
return -EKEYREJECTED;
}
return image->fops->verify_sig(kernel, kernel_len);
}
#endif
/*
* Apply purgatory relocations.
*
......
......@@ -135,6 +135,11 @@ struct kexec_file_ops {
#endif
};
extern const struct kexec_file_ops * const kexec_file_loaders[];
int kexec_image_probe_default(struct kimage *image, void *buf,
unsigned long buf_len);
/**
* struct kexec_buf - parameters for finding a place for a buffer in memory
* @image: kexec image in which memory to search.
......@@ -209,7 +214,7 @@ struct kimage {
unsigned long cmdline_buf_len;
/* File operations provided by image loader */
struct kexec_file_ops *fops;
const struct kexec_file_ops *fops;
/* Image loader handling the kernel can store a pointer here */
void *image_loader_data;
......@@ -273,12 +278,6 @@ int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len);
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len);
int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
Elf_Shdr *sechdrs, unsigned int relsec);
int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
......
......@@ -28,28 +28,80 @@
static int kexec_calculate_store_digests(struct kimage *image);
/*
* Currently this is the only default function that is exported as some
* architectures need it to do additional handlings.
* In the future, other default functions may be exported too if required.
*/
int kexec_image_probe_default(struct kimage *image, void *buf,
unsigned long buf_len)
{
const struct kexec_file_ops * const *fops;
int ret = -ENOEXEC;
for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
ret = (*fops)->probe(buf, buf_len);
if (!ret) {
image->fops = *fops;
return ret;
}
}
return ret;
}
/* Architectures can provide this probe function */
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
return -ENOEXEC;
return kexec_image_probe_default(image, buf, buf_len);
}
static void *kexec_image_load_default(struct kimage *image)
{
if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC);
return image->fops->load(image, image->kernel_buf,
image->kernel_buf_len, image->initrd_buf,
image->initrd_buf_len, image->cmdline_buf,
image->cmdline_buf_len);
}
void * __weak arch_kexec_kernel_image_load(struct kimage *image)
{
return ERR_PTR(-ENOEXEC);
return kexec_image_load_default(image);
}
static int kexec_image_post_load_cleanup_default(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
return image->fops->cleanup(image->image_loader_data);
}
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
{
return -EINVAL;
return kexec_image_post_load_cleanup_default(image);
}
#ifdef CONFIG_KEXEC_VERIFY_SIG
static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
unsigned long buf_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.\n");
return -EKEYREJECTED;
}
return image->fops->verify_sig(buf, buf_len);
}
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len)
{
return -EKEYREJECTED;
return kexec_image_verify_sig_default(image, buf, buf_len);
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment