// SPDX-License-Identifier: GPL-2.0 /* * Kexec image loader * Copyright (C) 2018 Linaro Limited * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> */ #define pr_fmt(fmt) "kexec_file(Image): " fmt #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/kexec.h> #include <linux/pe.h> #include <linux/string.h> #include <asm/byteorder.h> #include <asm/cpufeature.h> #include <asm/image.h> #include <asm/memory.h> static int image_probe(const char *kernel_buf, unsigned long kernel_len) { const struct arm64_image_header *h = (const struct arm64_image_header *)(kernel_buf); if (!h || (kernel_len < sizeof(*h))) return -EINVAL; if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic))) return -EINVAL; return 0; } static void *image_load(struct kimage *image, char *kernel, unsigned long kernel_len, char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len) { struct arm64_image_header *h; u64 flags, value; bool be_image, be_kernel; struct kexec_buf kbuf; unsigned long text_offset, kernel_segment_number; struct kexec_segment *kernel_segment; int ret; /* * We require a kernel with an unambiguous Image header. Per * Documentation/arch/arm64/booting.rst, this is the case when image_size * is non-zero (practically speaking, since v3.17). */ h = (struct arm64_image_header *)kernel; if (!h->image_size) return ERR_PTR(-EINVAL); /* Check cpu features */ flags = le64_to_cpu(h->flags); be_image = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_BE); be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); if ((be_image != be_kernel) && !system_supports_mixed_endian()) return ERR_PTR(-EINVAL); value = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_PAGE_SIZE); if (((value == ARM64_IMAGE_FLAG_PAGE_SIZE_4K) && !system_supports_4kb_granule()) || ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_64K) && !system_supports_64kb_granule()) || ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_16K) && !system_supports_16kb_granule())) return ERR_PTR(-EINVAL); /* Load the kernel */ kbuf.image = image; kbuf.buf_min = 0; kbuf.buf_max = ULONG_MAX; kbuf.top_down = false; kbuf.buffer = kernel; kbuf.bufsz = kernel_len; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = le64_to_cpu(h->image_size); text_offset = le64_to_cpu(h->text_offset); kbuf.buf_align = MIN_KIMG_ALIGN; /* Adjust kernel segment with TEXT_OFFSET */ kbuf.memsz += text_offset; kernel_segment_number = image->nr_segments; /* * The location of the kernel segment may make it impossible to satisfy * the other segment requirements, so we try repeatedly to find a * location that will work. */ while ((ret = kexec_add_buffer(&kbuf)) == 0) { /* Try to load additional data */ kernel_segment = &image->segment[kernel_segment_number]; ret = load_other_segments(image, kernel_segment->mem, kernel_segment->memsz, initrd, initrd_len, cmdline); if (!ret) break; /* * We couldn't find space for the other segments; erase the * kernel segment and try the next available hole. */ image->nr_segments -= 1; kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; } if (ret) { pr_err("Could not find any suitable kernel location!"); return ERR_PTR(ret); } kernel_segment = &image->segment[kernel_segment_number]; kernel_segment->mem += text_offset; kernel_segment->memsz -= text_offset; image->start = kernel_segment->mem; pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz); return NULL; } const struct kexec_file_ops kexec_image_ops = { .probe = image_probe, .load = image_load, #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG .verify_sig = kexec_kernel_verify_pe_sig, #endif };