forked from luck/tmp_suning_uos_patched
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: - various hotfixes - kexec_file updates and feature work * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (27 commits) kernel/kexec_file.c: move purgatories sha256 to common code kernel/kexec_file.c: allow archs to set purgatory load address kernel/kexec_file.c: remove mis-use of sh_offset field during purgatory load kernel/kexec_file.c: remove unneeded variables in kexec_purgatory_setup_sechdrs kernel/kexec_file.c: remove unneeded for-loop in kexec_purgatory_setup_sechdrs kernel/kexec_file.c: split up __kexec_load_puragory kernel/kexec_file.c: use read-only sections in arch_kexec_apply_relocations* kernel/kexec_file.c: search symbols in read-only kexec_purgatory kernel/kexec_file.c: make purgatory_info->ehdr const kernel/kexec_file.c: remove checks in kexec_purgatory_load include/linux/kexec.h: silence compile warnings kexec_file, x86: move re-factored code to generic side x86: kexec_file: clean up prepare_elf64_headers() x86: kexec_file: lift CRASH_MAX_RANGES limit on crash_mem buffer x86: kexec_file: remove X86_64 dependency from prepare_elf64_headers() x86: kexec_file: purge system-ram walking from prepare_elf64_headers() kexec_file,x86,powerpc: factor out kexec_file_ops functions kexec_file: make use of purgatory optional proc: revalidate misc dentries mm, slab: reschedule cache_reap() on the same CPU ...
This commit is contained in:
commit
18b7fd1c93
|
@ -178,6 +178,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
|||
/*
|
||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
||||
* back to the regular GUP.
|
||||
* Note a difference with get_user_pages_fast: this always returns the
|
||||
* number of pages pinned, 0 if no pages were pinned.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
|
|
|
@ -552,6 +552,9 @@ config KEXEC_FILE
|
|||
for kernel and initramfs as opposed to a list of segments as is the
|
||||
case for the older kexec call.
|
||||
|
||||
config ARCH_HAS_KEXEC_PURGATORY
|
||||
def_bool KEXEC_FILE
|
||||
|
||||
config RELOCATABLE
|
||||
bool "Build a relocatable kernel"
|
||||
depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE))
|
||||
|
|
|
@ -95,7 +95,7 @@ static inline bool kdump_in_progress(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
extern struct kexec_file_ops kexec_elf64_ops;
|
||||
extern const struct kexec_file_ops kexec_elf64_ops;
|
||||
|
||||
#ifdef CONFIG_IMA_KEXEC
|
||||
#define ARCH_HAS_KIMAGE_ARCH
|
||||
|
|
|
@ -572,7 +572,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
|
|||
{
|
||||
int ret;
|
||||
unsigned int fdt_size;
|
||||
unsigned long kernel_load_addr, purgatory_load_addr;
|
||||
unsigned long kernel_load_addr;
|
||||
unsigned long initrd_load_addr = 0, fdt_load_addr;
|
||||
void *fdt;
|
||||
const void *slave_code;
|
||||
|
@ -580,6 +580,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
|
|||
struct elf_info elf_info;
|
||||
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
|
||||
.buf_max = ppc64_rma_size };
|
||||
struct kexec_buf pbuf = { .image = image, .buf_min = 0,
|
||||
.buf_max = ppc64_rma_size, .top_down = true };
|
||||
|
||||
ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
|
||||
if (ret)
|
||||
|
@ -591,14 +593,13 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
|
|||
|
||||
pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
|
||||
|
||||
ret = kexec_load_purgatory(image, 0, ppc64_rma_size, true,
|
||||
&purgatory_load_addr);
|
||||
ret = kexec_load_purgatory(image, &pbuf);
|
||||
if (ret) {
|
||||
pr_err("Loading purgatory failed.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr);
|
||||
pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
|
||||
|
||||
if (initrd != NULL) {
|
||||
kbuf.buffer = initrd;
|
||||
|
@ -657,7 +658,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
|
|||
return ret ? ERR_PTR(ret) : fdt;
|
||||
}
|
||||
|
||||
struct kexec_file_ops kexec_elf64_ops = {
|
||||
const struct kexec_file_ops kexec_elf64_ops = {
|
||||
.probe = elf64_probe,
|
||||
.load = elf64_load,
|
||||
};
|
||||
|
|
|
@ -31,52 +31,19 @@
|
|||
|
||||
#define SLAVE_CODE_SIZE 256
|
||||
|
||||
static struct kexec_file_ops *kexec_file_loaders[] = {
|
||||
const struct kexec_file_ops * const kexec_file_loaders[] = {
|
||||
&kexec_elf64_ops,
|
||||
NULL
|
||||
};
|
||||
|
||||
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||
unsigned long buf_len)
|
||||
{
|
||||
int i, ret = -ENOEXEC;
|
||||
struct kexec_file_ops *fops;
|
||||
|
||||
/* We don't support crash kernels yet. */
|
||||
if (image->type == KEXEC_TYPE_CRASH)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
|
||||
fops = kexec_file_loaders[i];
|
||||
if (!fops || !fops->probe)
|
||||
continue;
|
||||
|
||||
ret = fops->probe(buf, buf_len);
|
||||
if (!ret) {
|
||||
image->fops = fops;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *arch_kexec_kernel_image_load(struct kimage *image)
|
||||
{
|
||||
if (!image->fops || !image->fops->load)
|
||||
return ERR_PTR(-ENOEXEC);
|
||||
|
||||
return image->fops->load(image, image->kernel_buf,
|
||||
image->kernel_buf_len, image->initrd_buf,
|
||||
image->initrd_buf_len, image->cmdline_buf,
|
||||
image->cmdline_buf_len);
|
||||
}
|
||||
|
||||
int arch_kimage_file_post_load_cleanup(struct kimage *image)
|
||||
{
|
||||
if (!image->fops || !image->fops->cleanup)
|
||||
return 0;
|
||||
|
||||
return image->fops->cleanup(image->image_loader_data);
|
||||
return kexec_image_probe_default(image, buf, buf_len);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -220,6 +220,8 @@ static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
|
|||
/*
|
||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
||||
* back to the regular GUP.
|
||||
* Note a difference with get_user_pages_fast: this always returns the
|
||||
* number of pages pinned, 0 if no pages were pinned.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
|
|
|
@ -160,6 +160,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
|||
/*
|
||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
||||
* back to the regular GUP.
|
||||
* Note a difference with get_user_pages_fast: this always returns the
|
||||
* number of pages pinned, 0 if no pages were pinned.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
|
|
|
@ -193,6 +193,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note a difference with get_user_pages_fast: this always returns the
|
||||
* number of pages pinned, 0 if no pages were pinned.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
|
|
|
@ -2008,6 +2008,9 @@ config KEXEC_FILE
|
|||
for kernel and initramfs as opposed to list of segments as
|
||||
accepted by previous system call.
|
||||
|
||||
config ARCH_HAS_KEXEC_PURGATORY
|
||||
def_bool KEXEC_FILE
|
||||
|
||||
config KEXEC_VERIFY_SIG
|
||||
bool "Verify kernel signature during kexec_file_load() syscall"
|
||||
depends on KEXEC_FILE
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
#ifndef _ASM_KEXEC_BZIMAGE64_H
|
||||
#define _ASM_KEXEC_BZIMAGE64_H
|
||||
|
||||
extern struct kexec_file_ops kexec_bzImage64_ops;
|
||||
extern const struct kexec_file_ops kexec_bzImage64_ops;
|
||||
|
||||
#endif /* _ASM_KEXE_BZIMAGE64_H */
|
||||
|
|
|
@ -38,37 +38,6 @@
|
|||
#include <asm/virtext.h>
|
||||
#include <asm/intel_pt.h>
|
||||
|
||||
/* Alignment required for elf header segment */
|
||||
#define ELF_CORE_HEADER_ALIGN 4096
|
||||
|
||||
/* This primarily represents number of split ranges due to exclusion */
|
||||
#define CRASH_MAX_RANGES 16
|
||||
|
||||
struct crash_mem_range {
|
||||
u64 start, end;
|
||||
};
|
||||
|
||||
struct crash_mem {
|
||||
unsigned int nr_ranges;
|
||||
struct crash_mem_range ranges[CRASH_MAX_RANGES];
|
||||
};
|
||||
|
||||
/* Misc data about ram ranges needed to prepare elf headers */
|
||||
struct crash_elf_data {
|
||||
struct kimage *image;
|
||||
/*
|
||||
* Total number of ram ranges we have after various adjustments for
|
||||
* crash reserved region, etc.
|
||||
*/
|
||||
unsigned int max_nr_ranges;
|
||||
|
||||
/* Pointer to elf header */
|
||||
void *ehdr;
|
||||
/* Pointer to next phdr */
|
||||
void *bufp;
|
||||
struct crash_mem mem;
|
||||
};
|
||||
|
||||
/* Used while preparing memory map entries for second kernel */
|
||||
struct crash_memmap_data {
|
||||
struct boot_params *params;
|
||||
|
@ -218,124 +187,49 @@ static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Gather all the required information to prepare elf headers for ram regions */
|
||||
static void fill_up_crash_elf_data(struct crash_elf_data *ced,
|
||||
struct kimage *image)
|
||||
static struct crash_mem *fill_up_crash_elf_data(void)
|
||||
{
|
||||
unsigned int nr_ranges = 0;
|
||||
|
||||
ced->image = image;
|
||||
struct crash_mem *cmem;
|
||||
|
||||
walk_system_ram_res(0, -1, &nr_ranges,
|
||||
get_nr_ram_ranges_callback);
|
||||
if (!nr_ranges)
|
||||
return NULL;
|
||||
|
||||
ced->max_nr_ranges = nr_ranges;
|
||||
/*
|
||||
* Exclusion of crash region and/or crashk_low_res may cause
|
||||
* another range split. So add extra two slots here.
|
||||
*/
|
||||
nr_ranges += 2;
|
||||
cmem = vzalloc(sizeof(struct crash_mem) +
|
||||
sizeof(struct crash_mem_range) * nr_ranges);
|
||||
if (!cmem)
|
||||
return NULL;
|
||||
|
||||
/* Exclusion of crash region could split memory ranges */
|
||||
ced->max_nr_ranges++;
|
||||
cmem->max_nr_ranges = nr_ranges;
|
||||
cmem->nr_ranges = 0;
|
||||
|
||||
/* If crashk_low_res is not 0, another range split possible */
|
||||
if (crashk_low_res.end)
|
||||
ced->max_nr_ranges++;
|
||||
}
|
||||
|
||||
static int exclude_mem_range(struct crash_mem *mem,
|
||||
unsigned long long mstart, unsigned long long mend)
|
||||
{
|
||||
int i, j;
|
||||
unsigned long long start, end;
|
||||
struct crash_mem_range temp_range = {0, 0};
|
||||
|
||||
for (i = 0; i < mem->nr_ranges; i++) {
|
||||
start = mem->ranges[i].start;
|
||||
end = mem->ranges[i].end;
|
||||
|
||||
if (mstart > end || mend < start)
|
||||
continue;
|
||||
|
||||
/* Truncate any area outside of range */
|
||||
if (mstart < start)
|
||||
mstart = start;
|
||||
if (mend > end)
|
||||
mend = end;
|
||||
|
||||
/* Found completely overlapping range */
|
||||
if (mstart == start && mend == end) {
|
||||
mem->ranges[i].start = 0;
|
||||
mem->ranges[i].end = 0;
|
||||
if (i < mem->nr_ranges - 1) {
|
||||
/* Shift rest of the ranges to left */
|
||||
for (j = i; j < mem->nr_ranges - 1; j++) {
|
||||
mem->ranges[j].start =
|
||||
mem->ranges[j+1].start;
|
||||
mem->ranges[j].end =
|
||||
mem->ranges[j+1].end;
|
||||
}
|
||||
}
|
||||
mem->nr_ranges--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mstart > start && mend < end) {
|
||||
/* Split original range */
|
||||
mem->ranges[i].end = mstart - 1;
|
||||
temp_range.start = mend + 1;
|
||||
temp_range.end = end;
|
||||
} else if (mstart != start)
|
||||
mem->ranges[i].end = mstart - 1;
|
||||
else
|
||||
mem->ranges[i].start = mend + 1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* If a split happend, add the split to array */
|
||||
if (!temp_range.end)
|
||||
return 0;
|
||||
|
||||
/* Split happened */
|
||||
if (i == CRASH_MAX_RANGES - 1) {
|
||||
pr_err("Too many crash ranges after split\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Location where new range should go */
|
||||
j = i + 1;
|
||||
if (j < mem->nr_ranges) {
|
||||
/* Move over all ranges one slot towards the end */
|
||||
for (i = mem->nr_ranges - 1; i >= j; i--)
|
||||
mem->ranges[i + 1] = mem->ranges[i];
|
||||
}
|
||||
|
||||
mem->ranges[j].start = temp_range.start;
|
||||
mem->ranges[j].end = temp_range.end;
|
||||
mem->nr_ranges++;
|
||||
return 0;
|
||||
return cmem;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look for any unwanted ranges between mstart, mend and remove them. This
|
||||
* might lead to split and split ranges are put in ced->mem.ranges[] array
|
||||
* might lead to split and split ranges are put in cmem->ranges[] array
|
||||
*/
|
||||
static int elf_header_exclude_ranges(struct crash_elf_data *ced,
|
||||
unsigned long long mstart, unsigned long long mend)
|
||||
static int elf_header_exclude_ranges(struct crash_mem *cmem)
|
||||
{
|
||||
struct crash_mem *cmem = &ced->mem;
|
||||
int ret = 0;
|
||||
|
||||
memset(cmem->ranges, 0, sizeof(cmem->ranges));
|
||||
|
||||
cmem->ranges[0].start = mstart;
|
||||
cmem->ranges[0].end = mend;
|
||||
cmem->nr_ranges = 1;
|
||||
|
||||
/* Exclude crashkernel region */
|
||||
ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
||||
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (crashk_low_res.end) {
|
||||
ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
|
||||
ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
|
||||
crashk_low_res.end);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -345,144 +239,12 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
|
|||
|
||||
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
|
||||
{
|
||||
struct crash_elf_data *ced = arg;
|
||||
Elf64_Ehdr *ehdr;
|
||||
Elf64_Phdr *phdr;
|
||||
unsigned long mstart, mend;
|
||||
struct kimage *image = ced->image;
|
||||
struct crash_mem *cmem;
|
||||
int ret, i;
|
||||
struct crash_mem *cmem = arg;
|
||||
|
||||
ehdr = ced->ehdr;
|
||||
cmem->ranges[cmem->nr_ranges].start = res->start;
|
||||
cmem->ranges[cmem->nr_ranges].end = res->end;
|
||||
cmem->nr_ranges++;
|
||||
|
||||
/* Exclude unwanted mem ranges */
|
||||
ret = elf_header_exclude_ranges(ced, res->start, res->end);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
|
||||
cmem = &ced->mem;
|
||||
|
||||
for (i = 0; i < cmem->nr_ranges; i++) {
|
||||
mstart = cmem->ranges[i].start;
|
||||
mend = cmem->ranges[i].end;
|
||||
|
||||
phdr = ced->bufp;
|
||||
ced->bufp += sizeof(Elf64_Phdr);
|
||||
|
||||
phdr->p_type = PT_LOAD;
|
||||
phdr->p_flags = PF_R|PF_W|PF_X;
|
||||
phdr->p_offset = mstart;
|
||||
|
||||
/*
|
||||
* If a range matches backup region, adjust offset to backup
|
||||
* segment.
|
||||
*/
|
||||
if (mstart == image->arch.backup_src_start &&
|
||||
(mend - mstart + 1) == image->arch.backup_src_sz)
|
||||
phdr->p_offset = image->arch.backup_load_addr;
|
||||
|
||||
phdr->p_paddr = mstart;
|
||||
phdr->p_vaddr = (unsigned long long) __va(mstart);
|
||||
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
|
||||
phdr->p_align = 0;
|
||||
ehdr->e_phnum++;
|
||||
pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
|
||||
phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
|
||||
ehdr->e_phnum, phdr->p_offset);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int prepare_elf64_headers(struct crash_elf_data *ced,
|
||||
void **addr, unsigned long *sz)
|
||||
{
|
||||
Elf64_Ehdr *ehdr;
|
||||
Elf64_Phdr *phdr;
|
||||
unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
|
||||
unsigned char *buf, *bufp;
|
||||
unsigned int cpu;
|
||||
unsigned long long notes_addr;
|
||||
int ret;
|
||||
|
||||
/* extra phdr for vmcoreinfo elf note */
|
||||
nr_phdr = nr_cpus + 1;
|
||||
nr_phdr += ced->max_nr_ranges;
|
||||
|
||||
/*
|
||||
* kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
|
||||
* area on x86_64 (ffffffff80000000 - ffffffffa0000000).
|
||||
* I think this is required by tools like gdb. So same physical
|
||||
* memory will be mapped in two elf headers. One will contain kernel
|
||||
* text virtual addresses and other will have __va(physical) addresses.
|
||||
*/
|
||||
|
||||
nr_phdr++;
|
||||
elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
|
||||
elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
|
||||
|
||||
buf = vzalloc(elf_sz);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
bufp = buf;
|
||||
ehdr = (Elf64_Ehdr *)bufp;
|
||||
bufp += sizeof(Elf64_Ehdr);
|
||||
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
|
||||
ehdr->e_ident[EI_CLASS] = ELFCLASS64;
|
||||
ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
|
||||
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
|
||||
ehdr->e_ident[EI_OSABI] = ELF_OSABI;
|
||||
memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
|
||||
ehdr->e_type = ET_CORE;
|
||||
ehdr->e_machine = ELF_ARCH;
|
||||
ehdr->e_version = EV_CURRENT;
|
||||
ehdr->e_phoff = sizeof(Elf64_Ehdr);
|
||||
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
|
||||
ehdr->e_phentsize = sizeof(Elf64_Phdr);
|
||||
|
||||
/* Prepare one phdr of type PT_NOTE for each present cpu */
|
||||
for_each_present_cpu(cpu) {
|
||||
phdr = (Elf64_Phdr *)bufp;
|
||||
bufp += sizeof(Elf64_Phdr);
|
||||
phdr->p_type = PT_NOTE;
|
||||
notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
|
||||
phdr->p_offset = phdr->p_paddr = notes_addr;
|
||||
phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
|
||||
(ehdr->e_phnum)++;
|
||||
}
|
||||
|
||||
/* Prepare one PT_NOTE header for vmcoreinfo */
|
||||
phdr = (Elf64_Phdr *)bufp;
|
||||
bufp += sizeof(Elf64_Phdr);
|
||||
phdr->p_type = PT_NOTE;
|
||||
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
|
||||
phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
|
||||
(ehdr->e_phnum)++;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Prepare PT_LOAD type program header for kernel text region */
|
||||
phdr = (Elf64_Phdr *)bufp;
|
||||
bufp += sizeof(Elf64_Phdr);
|
||||
phdr->p_type = PT_LOAD;
|
||||
phdr->p_flags = PF_R|PF_W|PF_X;
|
||||
phdr->p_vaddr = (Elf64_Addr)_text;
|
||||
phdr->p_filesz = phdr->p_memsz = _end - _text;
|
||||
phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
|
||||
(ehdr->e_phnum)++;
|
||||
#endif
|
||||
|
||||
/* Prepare PT_LOAD headers for system ram chunks. */
|
||||
ced->ehdr = ehdr;
|
||||
ced->bufp = bufp;
|
||||
ret = walk_system_ram_res(0, -1, ced,
|
||||
prepare_elf64_ram_headers_callback);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*addr = buf;
|
||||
*sz = elf_sz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -490,18 +252,46 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
|
|||
static int prepare_elf_headers(struct kimage *image, void **addr,
|
||||
unsigned long *sz)
|
||||
{
|
||||
struct crash_elf_data *ced;
|
||||
int ret;
|
||||
struct crash_mem *cmem;
|
||||
Elf64_Ehdr *ehdr;
|
||||
Elf64_Phdr *phdr;
|
||||
int ret, i;
|
||||
|
||||
ced = kzalloc(sizeof(*ced), GFP_KERNEL);
|
||||
if (!ced)
|
||||
cmem = fill_up_crash_elf_data();
|
||||
if (!cmem)
|
||||
return -ENOMEM;
|
||||
|
||||
fill_up_crash_elf_data(ced, image);
|
||||
ret = walk_system_ram_res(0, -1, cmem,
|
||||
prepare_elf64_ram_headers_callback);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Exclude unwanted mem ranges */
|
||||
ret = elf_header_exclude_ranges(cmem);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* By default prepare 64bit headers */
|
||||
ret = prepare_elf64_headers(ced, addr, sz);
|
||||
kfree(ced);
|
||||
ret = crash_prepare_elf64_headers(cmem,
|
||||
IS_ENABLED(CONFIG_X86_64), addr, sz);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If a range matches backup region, adjust offset to backup
|
||||
* segment.
|
||||
*/
|
||||
ehdr = (Elf64_Ehdr *)*addr;
|
||||
phdr = (Elf64_Phdr *)(ehdr + 1);
|
||||
for (i = 0; i < ehdr->e_phnum; phdr++, i++)
|
||||
if (phdr->p_type == PT_LOAD &&
|
||||
phdr->p_paddr == image->arch.backup_src_start &&
|
||||
phdr->p_memsz == image->arch.backup_src_sz) {
|
||||
phdr->p_offset = image->arch.backup_load_addr;
|
||||
break;
|
||||
}
|
||||
out:
|
||||
vfree(cmem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -547,14 +337,14 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
|
|||
/* Exclude Backup region */
|
||||
start = image->arch.backup_load_addr;
|
||||
end = start + image->arch.backup_src_sz - 1;
|
||||
ret = exclude_mem_range(cmem, start, end);
|
||||
ret = crash_exclude_mem_range(cmem, start, end);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Exclude elf header region */
|
||||
start = image->arch.elf_load_addr;
|
||||
end = start + image->arch.elf_headers_sz - 1;
|
||||
return exclude_mem_range(cmem, start, end);
|
||||
return crash_exclude_mem_range(cmem, start, end);
|
||||
}
|
||||
|
||||
/* Prepare memory map for crash dump kernel */
|
||||
|
|
|
@ -334,7 +334,6 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
|||
unsigned long setup_header_size, params_cmdline_sz;
|
||||
struct boot_params *params;
|
||||
unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr;
|
||||
unsigned long purgatory_load_addr;
|
||||
struct bzimage64_data *ldata;
|
||||
struct kexec_entry64_regs regs64;
|
||||
void *stack;
|
||||
|
@ -342,6 +341,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
|||
unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset;
|
||||
struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX,
|
||||
.top_down = true };
|
||||
struct kexec_buf pbuf = { .image = image, .buf_min = MIN_PURGATORY_ADDR,
|
||||
.buf_max = ULONG_MAX, .top_down = true };
|
||||
|
||||
header = (struct setup_header *)(kernel + setup_hdr_offset);
|
||||
setup_sects = header->setup_sects;
|
||||
|
@ -379,14 +380,13 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
|||
* Load purgatory. For 64bit entry point, purgatory code can be
|
||||
* anywhere.
|
||||
*/
|
||||
ret = kexec_load_purgatory(image, MIN_PURGATORY_ADDR, ULONG_MAX, 1,
|
||||
&purgatory_load_addr);
|
||||
ret = kexec_load_purgatory(image, &pbuf);
|
||||
if (ret) {
|
||||
pr_err("Loading purgatory failed\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr);
|
||||
pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -538,7 +538,7 @@ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
|
|||
}
|
||||
#endif
|
||||
|
||||
struct kexec_file_ops kexec_bzImage64_ops = {
|
||||
const struct kexec_file_ops kexec_bzImage64_ops = {
|
||||
.probe = bzImage64_probe,
|
||||
.load = bzImage64_load,
|
||||
.cleanup = bzImage64_cleanup,
|
||||
|
|
|
@ -30,8 +30,9 @@
|
|||
#include <asm/set_memory.h>
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
static struct kexec_file_ops *kexec_file_loaders[] = {
|
||||
const struct kexec_file_ops * const kexec_file_loaders[] = {
|
||||
&kexec_bzImage64_ops,
|
||||
NULL
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -364,27 +365,6 @@ void arch_crash_save_vmcoreinfo(void)
|
|||
/* arch-dependent functionality related to kexec file-based syscall */
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||
unsigned long buf_len)
|
||||
{
|
||||
int i, ret = -ENOEXEC;
|
||||
struct kexec_file_ops *fops;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
|
||||
fops = kexec_file_loaders[i];
|
||||
if (!fops || !fops->probe)
|
||||
continue;
|
||||
|
||||
ret = fops->probe(buf, buf_len);
|
||||
if (!ret) {
|
||||
image->fops = fops;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *arch_kexec_kernel_image_load(struct kimage *image)
|
||||
{
|
||||
vfree(image->arch.elf_headers);
|
||||
|
@ -399,88 +379,53 @@ void *arch_kexec_kernel_image_load(struct kimage *image)
|
|||
image->cmdline_buf_len);
|
||||
}
|
||||
|
||||
int arch_kimage_file_post_load_cleanup(struct kimage *image)
|
||||
{
|
||||
if (!image->fops || !image->fops->cleanup)
|
||||
return 0;
|
||||
|
||||
return image->fops->cleanup(image->image_loader_data);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_VERIFY_SIG
|
||||
int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
|
||||
unsigned long kernel_len)
|
||||
{
|
||||
if (!image->fops || !image->fops->verify_sig) {
|
||||
pr_debug("kernel loader does not support signature verification.");
|
||||
return -EKEYREJECTED;
|
||||
}
|
||||
|
||||
return image->fops->verify_sig(kernel, kernel_len);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Apply purgatory relocations.
|
||||
*
|
||||
* ehdr: Pointer to elf headers
|
||||
* sechdrs: Pointer to section headers.
|
||||
* relsec: section index of SHT_RELA section.
|
||||
* @pi: Purgatory to be relocated.
|
||||
* @section: Section relocations applying to.
|
||||
* @relsec: Section containing RELAs.
|
||||
* @symtabsec: Corresponding symtab.
|
||||
*
|
||||
* TODO: Some of the code belongs to generic code. Move that in kexec.c.
|
||||
*/
|
||||
int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
|
||||
Elf64_Shdr *sechdrs, unsigned int relsec)
|
||||
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
Elf_Shdr *section, const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtabsec)
|
||||
{
|
||||
unsigned int i;
|
||||
Elf64_Rela *rel;
|
||||
Elf64_Sym *sym;
|
||||
void *location;
|
||||
Elf64_Shdr *section, *symtabsec;
|
||||
unsigned long address, sec_base, value;
|
||||
const char *strtab, *name, *shstrtab;
|
||||
const Elf_Shdr *sechdrs;
|
||||
|
||||
/*
|
||||
* ->sh_offset has been modified to keep the pointer to section
|
||||
* contents in memory
|
||||
*/
|
||||
rel = (void *)sechdrs[relsec].sh_offset;
|
||||
/* String & section header string table */
|
||||
sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
|
||||
strtab = (char *)pi->ehdr + sechdrs[symtabsec->sh_link].sh_offset;
|
||||
shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
|
||||
|
||||
/* Section to which relocations apply */
|
||||
section = &sechdrs[sechdrs[relsec].sh_info];
|
||||
rel = (void *)pi->ehdr + relsec->sh_offset;
|
||||
|
||||
pr_debug("Applying relocate section %u to %u\n", relsec,
|
||||
sechdrs[relsec].sh_info);
|
||||
pr_debug("Applying relocate section %s to %u\n",
|
||||
shstrtab + relsec->sh_name, relsec->sh_info);
|
||||
|
||||
/* Associated symbol table */
|
||||
symtabsec = &sechdrs[sechdrs[relsec].sh_link];
|
||||
|
||||
/* String table */
|
||||
if (symtabsec->sh_link >= ehdr->e_shnum) {
|
||||
/* Invalid strtab section number */
|
||||
pr_err("Invalid string table section index %d\n",
|
||||
symtabsec->sh_link);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset;
|
||||
|
||||
/* section header string table */
|
||||
shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset;
|
||||
|
||||
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
||||
for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) {
|
||||
|
||||
/*
|
||||
* rel[i].r_offset contains byte offset from beginning
|
||||
* of section to the storage unit affected.
|
||||
*
|
||||
* This is location to update (->sh_offset). This is temporary
|
||||
* buffer where section is currently loaded. This will finally
|
||||
* be loaded to a different address later, pointed to by
|
||||
* This is location to update. This is temporary buffer
|
||||
* where section is currently loaded. This will finally be
|
||||
* loaded to a different address later, pointed to by
|
||||
* ->sh_addr. kexec takes care of moving it
|
||||
* (kexec_load_segment()).
|
||||
*/
|
||||
location = (void *)(section->sh_offset + rel[i].r_offset);
|
||||
location = pi->purgatory_buf;
|
||||
location += section->sh_offset;
|
||||
location += rel[i].r_offset;
|
||||
|
||||
/* Final address of the location */
|
||||
address = section->sh_addr + rel[i].r_offset;
|
||||
|
@ -491,8 +436,8 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
|
|||
* to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
|
||||
* these respectively.
|
||||
*/
|
||||
sym = (Elf64_Sym *)symtabsec->sh_offset +
|
||||
ELF64_R_SYM(rel[i].r_info);
|
||||
sym = (void *)pi->ehdr + symtabsec->sh_offset;
|
||||
sym += ELF64_R_SYM(rel[i].r_info);
|
||||
|
||||
if (sym->st_name)
|
||||
name = strtab + sym->st_name;
|
||||
|
@ -515,12 +460,12 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
|
|||
|
||||
if (sym->st_shndx == SHN_ABS)
|
||||
sec_base = 0;
|
||||
else if (sym->st_shndx >= ehdr->e_shnum) {
|
||||
else if (sym->st_shndx >= pi->ehdr->e_shnum) {
|
||||
pr_err("Invalid section %d for symbol %s\n",
|
||||
sym->st_shndx, name);
|
||||
return -ENOEXEC;
|
||||
} else
|
||||
sec_base = sechdrs[sym->st_shndx].sh_addr;
|
||||
sec_base = pi->sechdrs[sym->st_shndx].sh_addr;
|
||||
|
||||
value = sym->st_value;
|
||||
value += sec_base;
|
||||
|
|
|
@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
|
|||
targets += $(purgatory-y)
|
||||
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
|
||||
|
||||
$(obj)/sha256.o: $(srctree)/lib/sha256.c
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
|
||||
targets += purgatory.ro
|
||||
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/sha256.h>
|
||||
#include <asm/purgatory.h>
|
||||
|
||||
#include "sha256.h"
|
||||
#include "../boot/string.h"
|
||||
|
||||
unsigned long purgatory_backup_dest __section(.kexec-purgatory);
|
||||
|
|
|
@ -10,4 +10,16 @@
|
|||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "../boot/string.c"
|
||||
|
||||
void *memcpy(void *dst, const void *src, size_t len)
|
||||
{
|
||||
return __builtin_memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
void *memset(void *dst, int c, size_t len)
|
||||
{
|
||||
return __builtin_memset(dst, c, len);
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/stat.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/mount.h>
|
||||
|
@ -217,6 +218,26 @@ void proc_free_inum(unsigned int inum)
|
|||
ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
|
||||
}
|
||||
|
||||
static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
|
||||
if (atomic_read(&PDE(d_inode(dentry))->in_use) < 0)
|
||||
return 0; /* revalidate */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int proc_misc_d_delete(const struct dentry *dentry)
|
||||
{
|
||||
return atomic_read(&PDE(d_inode(dentry))->in_use) < 0;
|
||||
}
|
||||
|
||||
static const struct dentry_operations proc_misc_dentry_ops = {
|
||||
.d_revalidate = proc_misc_d_revalidate,
|
||||
.d_delete = proc_misc_d_delete,
|
||||
};
|
||||
|
||||
/*
|
||||
* Don't create negative dentries here, return -ENOENT by hand
|
||||
* instead.
|
||||
|
@ -234,7 +255,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
|
|||
inode = proc_get_inode(dir->i_sb, de);
|
||||
if (!inode)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
d_set_d_op(dentry, &simple_dentry_operations);
|
||||
d_set_d_op(dentry, &proc_misc_dentry_ops);
|
||||
d_add(dentry, inode);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -99,21 +99,25 @@ struct compat_kexec_segment {
|
|||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
struct purgatory_info {
|
||||
/* Pointer to elf header of read only purgatory */
|
||||
Elf_Ehdr *ehdr;
|
||||
|
||||
/* Pointer to purgatory sechdrs which are modifiable */
|
||||
/*
|
||||
* Pointer to elf header at the beginning of kexec_purgatory.
|
||||
* Note: kexec_purgatory is read only
|
||||
*/
|
||||
const Elf_Ehdr *ehdr;
|
||||
/*
|
||||
* Temporary, modifiable buffer for sechdrs used for relocation.
|
||||
* This memory can be freed post image load.
|
||||
*/
|
||||
Elf_Shdr *sechdrs;
|
||||
/*
|
||||
* Temporary buffer location where purgatory is loaded and relocated
|
||||
* This memory can be freed post image load
|
||||
* Temporary, modifiable buffer for stripped purgatory used for
|
||||
* relocation. This memory can be freed post image load.
|
||||
*/
|
||||
void *purgatory_buf;
|
||||
|
||||
/* Address where purgatory is finally loaded and is executed from */
|
||||
unsigned long purgatory_load_addr;
|
||||
};
|
||||
|
||||
struct kimage;
|
||||
|
||||
typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
|
||||
typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
|
||||
unsigned long kernel_len, char *initrd,
|
||||
|
@ -135,6 +139,11 @@ struct kexec_file_ops {
|
|||
#endif
|
||||
};
|
||||
|
||||
extern const struct kexec_file_ops * const kexec_file_loaders[];
|
||||
|
||||
int kexec_image_probe_default(struct kimage *image, void *buf,
|
||||
unsigned long buf_len);
|
||||
|
||||
/**
|
||||
* struct kexec_buf - parameters for finding a place for a buffer in memory
|
||||
* @image: kexec image in which memory to search.
|
||||
|
@ -159,10 +168,44 @@ struct kexec_buf {
|
|||
bool top_down;
|
||||
};
|
||||
|
||||
int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf);
|
||||
int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
|
||||
void *buf, unsigned int size,
|
||||
bool get_value);
|
||||
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
|
||||
|
||||
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
|
||||
int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
|
||||
int (*func)(struct resource *, void *));
|
||||
extern int kexec_add_buffer(struct kexec_buf *kbuf);
|
||||
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
|
||||
|
||||
/* Alignment required for elf header segment */
|
||||
#define ELF_CORE_HEADER_ALIGN 4096
|
||||
|
||||
struct crash_mem_range {
|
||||
u64 start, end;
|
||||
};
|
||||
|
||||
struct crash_mem {
|
||||
unsigned int max_nr_ranges;
|
||||
unsigned int nr_ranges;
|
||||
struct crash_mem_range ranges[0];
|
||||
};
|
||||
|
||||
extern int crash_exclude_mem_range(struct crash_mem *mem,
|
||||
unsigned long long mstart,
|
||||
unsigned long long mend);
|
||||
extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
|
||||
void **addr, unsigned long *sz);
|
||||
#endif /* CONFIG_KEXEC_FILE */
|
||||
|
||||
struct kimage {
|
||||
|
@ -209,7 +252,7 @@ struct kimage {
|
|||
unsigned long cmdline_buf_len;
|
||||
|
||||
/* File operations provided by image loader */
|
||||
struct kexec_file_ops *fops;
|
||||
const struct kexec_file_ops *fops;
|
||||
|
||||
/* Image loader handling the kernel can store a pointer here */
|
||||
void *image_loader_data;
|
||||
|
@ -226,14 +269,6 @@ extern void machine_kexec_cleanup(struct kimage *image);
|
|||
extern int kernel_kexec(void);
|
||||
extern struct page *kimage_alloc_control_pages(struct kimage *image,
|
||||
unsigned int order);
|
||||
extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
|
||||
unsigned long max, int top_down,
|
||||
unsigned long *load_addr);
|
||||
extern int kexec_purgatory_get_set_symbol(struct kimage *image,
|
||||
const char *name, void *buf,
|
||||
unsigned int size, bool get_value);
|
||||
extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
|
||||
const char *name);
|
||||
extern void __crash_kexec(struct pt_regs *);
|
||||
extern void crash_kexec(struct pt_regs *);
|
||||
int kexec_should_crash(struct task_struct *);
|
||||
|
@ -273,16 +308,6 @@ int crash_shrink_memory(unsigned long new_size);
|
|||
size_t crash_get_memory_size(void);
|
||||
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
|
||||
|
||||
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||
unsigned long buf_len);
|
||||
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
|
||||
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
|
||||
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
|
||||
unsigned long buf_len);
|
||||
int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
|
||||
Elf_Shdr *sechdrs, unsigned int relsec);
|
||||
int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
unsigned int relsec);
|
||||
void arch_kexec_protect_crashkres(void);
|
||||
void arch_kexec_unprotect_crashkres(void);
|
||||
|
||||
|
|
|
@ -13,9 +13,18 @@
|
|||
#include <linux/types.h>
|
||||
#include <crypto/sha.h>
|
||||
|
||||
/*
|
||||
* Stand-alone implementation of the SHA256 algorithm. It is designed to
|
||||
* have as little dependencies as possible so it can be used in the
|
||||
* kexec_file purgatory. In other cases you should use the implementation in
|
||||
* crypto/.
|
||||
*
|
||||
* For details see lib/sha256.c
|
||||
*/
|
||||
|
||||
extern int sha256_init(struct sha256_state *sctx);
|
||||
extern int sha256_update(struct sha256_state *sctx, const u8 *input,
|
||||
unsigned int length);
|
||||
unsigned int length);
|
||||
extern int sha256_final(struct sha256_state *sctx, u8 *hash);
|
||||
|
||||
#endif /* SHA256_H */
|
23
ipc/shm.c
23
ipc/shm.c
|
@ -225,6 +225,12 @@ static int __shm_open(struct vm_area_struct *vma)
|
|||
if (IS_ERR(shp))
|
||||
return PTR_ERR(shp);
|
||||
|
||||
if (shp->shm_file != sfd->file) {
|
||||
/* ID was reused */
|
||||
shm_unlock(shp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
shp->shm_atim = ktime_get_real_seconds();
|
||||
ipc_update_pid(&shp->shm_lprid, task_tgid(current));
|
||||
shp->shm_nattch++;
|
||||
|
@ -455,8 +461,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
int ret;
|
||||
|
||||
/*
|
||||
* In case of remap_file_pages() emulation, the file can represent
|
||||
* removed IPC ID: propogate shm_lock() error to caller.
|
||||
* In case of remap_file_pages() emulation, the file can represent an
|
||||
* IPC ID that was removed, and possibly even reused by another shm
|
||||
* segment already. Propagate this case as an error to caller.
|
||||
*/
|
||||
ret = __shm_open(vma);
|
||||
if (ret)
|
||||
|
@ -480,6 +487,7 @@ static int shm_release(struct inode *ino, struct file *file)
|
|||
struct shm_file_data *sfd = shm_file_data(file);
|
||||
|
||||
put_ipc_ns(sfd->ns);
|
||||
fput(sfd->file);
|
||||
shm_file_data(file) = NULL;
|
||||
kfree(sfd);
|
||||
return 0;
|
||||
|
@ -1445,7 +1453,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
|
|||
file->f_mapping = shp->shm_file->f_mapping;
|
||||
sfd->id = shp->shm_perm.id;
|
||||
sfd->ns = get_ipc_ns(ns);
|
||||
sfd->file = shp->shm_file;
|
||||
/*
|
||||
* We need to take a reference to the real shm file to prevent the
|
||||
* pointer from becoming stale in cases where the lifetime of the outer
|
||||
* file extends beyond that of the shm segment. It's not usually
|
||||
* possible, but it can happen during remap_file_pages() emulation as
|
||||
* that unmaps the memory, then does ->mmap() via file reference only.
|
||||
* We'll deny the ->mmap() if the shm segment was since removed, but to
|
||||
* detect shm ID reuse we need to compare the file pointers.
|
||||
*/
|
||||
sfd->file = get_file(shp->shm_file);
|
||||
sfd->vm_ops = NULL;
|
||||
|
||||
err = security_mmap_file(file, prot, flags);
|
||||
|
|
|
@ -454,6 +454,7 @@ static int __init crash_save_vmcoreinfo_init(void)
|
|||
VMCOREINFO_NUMBER(PG_lru);
|
||||
VMCOREINFO_NUMBER(PG_private);
|
||||
VMCOREINFO_NUMBER(PG_swapcache);
|
||||
VMCOREINFO_NUMBER(PG_swapbacked);
|
||||
VMCOREINFO_NUMBER(PG_slab);
|
||||
#ifdef CONFIG_MEMORY_FAILURE
|
||||
VMCOREINFO_NUMBER(PG_hwpoison);
|
||||
|
|
|
@ -22,50 +22,123 @@
|
|||
#include <linux/ima.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "kexec_internal.h"
|
||||
|
||||
static int kexec_calculate_store_digests(struct kimage *image);
|
||||
|
||||
/*
|
||||
* Currently this is the only default function that is exported as some
|
||||
* architectures need it to do additional handlings.
|
||||
* In the future, other default functions may be exported too if required.
|
||||
*/
|
||||
int kexec_image_probe_default(struct kimage *image, void *buf,
|
||||
unsigned long buf_len)
|
||||
{
|
||||
const struct kexec_file_ops * const *fops;
|
||||
int ret = -ENOEXEC;
|
||||
|
||||
for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
|
||||
ret = (*fops)->probe(buf, buf_len);
|
||||
if (!ret) {
|
||||
image->fops = *fops;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Architectures can provide this probe function */
|
||||
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||
unsigned long buf_len)
|
||||
{
|
||||
return -ENOEXEC;
|
||||
return kexec_image_probe_default(image, buf, buf_len);
|
||||
}
|
||||
|
||||
static void *kexec_image_load_default(struct kimage *image)
|
||||
{
|
||||
if (!image->fops || !image->fops->load)
|
||||
return ERR_PTR(-ENOEXEC);
|
||||
|
||||
return image->fops->load(image, image->kernel_buf,
|
||||
image->kernel_buf_len, image->initrd_buf,
|
||||
image->initrd_buf_len, image->cmdline_buf,
|
||||
image->cmdline_buf_len);
|
||||
}
|
||||
|
||||
void * __weak arch_kexec_kernel_image_load(struct kimage *image)
|
||||
{
|
||||
return ERR_PTR(-ENOEXEC);
|
||||
return kexec_image_load_default(image);
|
||||
}
|
||||
|
||||
static int kexec_image_post_load_cleanup_default(struct kimage *image)
|
||||
{
|
||||
if (!image->fops || !image->fops->cleanup)
|
||||
return 0;
|
||||
|
||||
return image->fops->cleanup(image->image_loader_data);
|
||||
}
|
||||
|
||||
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
|
||||
{
|
||||
return -EINVAL;
|
||||
return kexec_image_post_load_cleanup_default(image);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_VERIFY_SIG
|
||||
static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
|
||||
unsigned long buf_len)
|
||||
{
|
||||
if (!image->fops || !image->fops->verify_sig) {
|
||||
pr_debug("kernel loader does not support signature verification.\n");
|
||||
return -EKEYREJECTED;
|
||||
}
|
||||
|
||||
return image->fops->verify_sig(buf, buf_len);
|
||||
}
|
||||
|
||||
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
|
||||
unsigned long buf_len)
|
||||
{
|
||||
return -EKEYREJECTED;
|
||||
return kexec_image_verify_sig_default(image, buf, buf_len);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Apply relocations of type RELA */
|
||||
/*
|
||||
* arch_kexec_apply_relocations_add - apply relocations of type RELA
|
||||
* @pi: Purgatory to be relocated.
|
||||
* @section: Section relocations applying to.
|
||||
* @relsec: Section containing RELAs.
|
||||
* @symtab: Corresponding symtab.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
int __weak
|
||||
arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
unsigned int relsec)
|
||||
arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
|
||||
{
|
||||
pr_err("RELA relocation unsupported.\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
/* Apply relocations of type REL */
|
||||
/*
|
||||
* arch_kexec_apply_relocations - apply relocations of type REL
|
||||
* @pi: Purgatory to be relocated.
|
||||
* @section: Section relocations applying to.
|
||||
* @relsec: Section containing RELs.
|
||||
* @symtab: Corresponding symtab.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
int __weak
|
||||
arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
unsigned int relsec)
|
||||
arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
|
||||
{
|
||||
pr_err("REL relocation unsupported.\n");
|
||||
return -ENOEXEC;
|
||||
|
@ -532,6 +605,9 @@ static int kexec_calculate_store_digests(struct kimage *image)
|
|||
struct kexec_sha_region *sha_regions;
|
||||
struct purgatory_info *pi = &image->purgatory_info;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
|
||||
return 0;
|
||||
|
||||
zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
|
||||
zero_buf_sz = PAGE_SIZE;
|
||||
|
||||
|
@ -633,87 +709,29 @@ static int kexec_calculate_store_digests(struct kimage *image)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Actually load purgatory. Lot of code taken from kexec-tools */
|
||||
static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
|
||||
unsigned long max, int top_down)
|
||||
#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
|
||||
/*
|
||||
* kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
|
||||
* @pi: Purgatory to be loaded.
|
||||
* @kbuf: Buffer to setup.
|
||||
*
|
||||
* Allocates the memory needed for the buffer. Caller is responsible to free
|
||||
* the memory after use.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
|
||||
struct kexec_buf *kbuf)
|
||||
{
|
||||
struct purgatory_info *pi = &image->purgatory_info;
|
||||
unsigned long align, bss_align, bss_sz, bss_pad;
|
||||
unsigned long entry, load_addr, curr_load_addr, bss_addr, offset;
|
||||
unsigned char *buf_addr, *src;
|
||||
int i, ret = 0, entry_sidx = -1;
|
||||
const Elf_Shdr *sechdrs_c;
|
||||
Elf_Shdr *sechdrs = NULL;
|
||||
struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1,
|
||||
.buf_min = min, .buf_max = max,
|
||||
.top_down = top_down };
|
||||
const Elf_Shdr *sechdrs;
|
||||
unsigned long bss_align;
|
||||
unsigned long bss_sz;
|
||||
unsigned long align;
|
||||
int i, ret;
|
||||
|
||||
/*
|
||||
* sechdrs_c points to section headers in purgatory and are read
|
||||
* only. No modifications allowed.
|
||||
*/
|
||||
sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff;
|
||||
|
||||
/*
|
||||
* We can not modify sechdrs_c[] and its fields. It is read only.
|
||||
* Copy it over to a local copy where one can store some temporary
|
||||
* data and free it at the end. We need to modify ->sh_addr and
|
||||
* ->sh_offset fields to keep track of permanent and temporary
|
||||
* locations of sections.
|
||||
*/
|
||||
sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
|
||||
if (!sechdrs)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr));
|
||||
|
||||
/*
|
||||
* We seem to have multiple copies of sections. First copy is which
|
||||
* is embedded in kernel in read only section. Some of these sections
|
||||
* will be copied to a temporary buffer and relocated. And these
|
||||
* sections will finally be copied to their final destination at
|
||||
* segment load time.
|
||||
*
|
||||
* Use ->sh_offset to reflect section address in memory. It will
|
||||
* point to original read only copy if section is not allocatable.
|
||||
* Otherwise it will point to temporary copy which will be relocated.
|
||||
*
|
||||
* Use ->sh_addr to contain final address of the section where it
|
||||
* will go during execution time.
|
||||
*/
|
||||
for (i = 0; i < pi->ehdr->e_shnum; i++) {
|
||||
if (sechdrs[i].sh_type == SHT_NOBITS)
|
||||
continue;
|
||||
|
||||
sechdrs[i].sh_offset = (unsigned long)pi->ehdr +
|
||||
sechdrs[i].sh_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Identify entry point section and make entry relative to section
|
||||
* start.
|
||||
*/
|
||||
entry = pi->ehdr->e_entry;
|
||||
for (i = 0; i < pi->ehdr->e_shnum; i++) {
|
||||
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
|
||||
continue;
|
||||
|
||||
if (!(sechdrs[i].sh_flags & SHF_EXECINSTR))
|
||||
continue;
|
||||
|
||||
/* Make entry section relative */
|
||||
if (sechdrs[i].sh_addr <= pi->ehdr->e_entry &&
|
||||
((sechdrs[i].sh_addr + sechdrs[i].sh_size) >
|
||||
pi->ehdr->e_entry)) {
|
||||
entry_sidx = i;
|
||||
entry -= sechdrs[i].sh_addr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Determine how much memory is needed to load relocatable object. */
|
||||
bss_align = 1;
|
||||
bss_sz = 0;
|
||||
sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
|
||||
kbuf->buf_align = bss_align = 1;
|
||||
kbuf->bufsz = bss_sz = 0;
|
||||
|
||||
for (i = 0; i < pi->ehdr->e_shnum; i++) {
|
||||
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
|
||||
|
@ -721,111 +739,124 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
|
|||
|
||||
align = sechdrs[i].sh_addralign;
|
||||
if (sechdrs[i].sh_type != SHT_NOBITS) {
|
||||
if (kbuf.buf_align < align)
|
||||
kbuf.buf_align = align;
|
||||
kbuf.bufsz = ALIGN(kbuf.bufsz, align);
|
||||
kbuf.bufsz += sechdrs[i].sh_size;
|
||||
if (kbuf->buf_align < align)
|
||||
kbuf->buf_align = align;
|
||||
kbuf->bufsz = ALIGN(kbuf->bufsz, align);
|
||||
kbuf->bufsz += sechdrs[i].sh_size;
|
||||
} else {
|
||||
/* bss section */
|
||||
if (bss_align < align)
|
||||
bss_align = align;
|
||||
bss_sz = ALIGN(bss_sz, align);
|
||||
bss_sz += sechdrs[i].sh_size;
|
||||
}
|
||||
}
|
||||
kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
|
||||
kbuf->memsz = kbuf->bufsz + bss_sz;
|
||||
if (kbuf->buf_align < bss_align)
|
||||
kbuf->buf_align = bss_align;
|
||||
|
||||
/* Determine the bss padding required to align bss properly */
|
||||
bss_pad = 0;
|
||||
if (kbuf.bufsz & (bss_align - 1))
|
||||
bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1));
|
||||
kbuf->buffer = vzalloc(kbuf->bufsz);
|
||||
if (!kbuf->buffer)
|
||||
return -ENOMEM;
|
||||
pi->purgatory_buf = kbuf->buffer;
|
||||
|
||||
kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz;
|
||||
|
||||
/* Allocate buffer for purgatory */
|
||||
kbuf.buffer = vzalloc(kbuf.bufsz);
|
||||
if (!kbuf.buffer) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (kbuf.buf_align < bss_align)
|
||||
kbuf.buf_align = bss_align;
|
||||
|
||||
/* Add buffer to segment list */
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
ret = kexec_add_buffer(kbuf);
|
||||
if (ret)
|
||||
goto out;
|
||||
pi->purgatory_load_addr = kbuf.mem;
|
||||
|
||||
/* Load SHF_ALLOC sections */
|
||||
buf_addr = kbuf.buffer;
|
||||
load_addr = curr_load_addr = pi->purgatory_load_addr;
|
||||
bss_addr = load_addr + kbuf.bufsz + bss_pad;
|
||||
return 0;
|
||||
out:
|
||||
vfree(pi->purgatory_buf);
|
||||
pi->purgatory_buf = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
|
||||
* @pi: Purgatory to be loaded.
|
||||
* @kbuf: Buffer prepared to store purgatory.
|
||||
*
|
||||
* Allocates the memory needed for the buffer. Caller is responsible to free
|
||||
* the memory after use.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
|
||||
struct kexec_buf *kbuf)
|
||||
{
|
||||
unsigned long bss_addr;
|
||||
unsigned long offset;
|
||||
Elf_Shdr *sechdrs;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* The section headers in kexec_purgatory are read-only. In order to
|
||||
* have them modifiable make a temporary copy.
|
||||
*/
|
||||
sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
|
||||
if (!sechdrs)
|
||||
return -ENOMEM;
|
||||
memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
|
||||
pi->ehdr->e_shnum * sizeof(Elf_Shdr));
|
||||
pi->sechdrs = sechdrs;
|
||||
|
||||
offset = 0;
|
||||
bss_addr = kbuf->mem + kbuf->bufsz;
|
||||
kbuf->image->start = pi->ehdr->e_entry;
|
||||
|
||||
for (i = 0; i < pi->ehdr->e_shnum; i++) {
|
||||
unsigned long align;
|
||||
void *src, *dst;
|
||||
|
||||
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
|
||||
continue;
|
||||
|
||||
align = sechdrs[i].sh_addralign;
|
||||
if (sechdrs[i].sh_type != SHT_NOBITS) {
|
||||
curr_load_addr = ALIGN(curr_load_addr, align);
|
||||
offset = curr_load_addr - load_addr;
|
||||
/* We already modifed ->sh_offset to keep src addr */
|
||||
src = (char *) sechdrs[i].sh_offset;
|
||||
memcpy(buf_addr + offset, src, sechdrs[i].sh_size);
|
||||
|
||||
/* Store load address and source address of section */
|
||||
sechdrs[i].sh_addr = curr_load_addr;
|
||||
|
||||
/*
|
||||
* This section got copied to temporary buffer. Update
|
||||
* ->sh_offset accordingly.
|
||||
*/
|
||||
sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset);
|
||||
|
||||
/* Advance to the next address */
|
||||
curr_load_addr += sechdrs[i].sh_size;
|
||||
} else {
|
||||
if (sechdrs[i].sh_type == SHT_NOBITS) {
|
||||
bss_addr = ALIGN(bss_addr, align);
|
||||
sechdrs[i].sh_addr = bss_addr;
|
||||
bss_addr += sechdrs[i].sh_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
offset = ALIGN(offset, align);
|
||||
if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
|
||||
pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
|
||||
pi->ehdr->e_entry < (sechdrs[i].sh_addr
|
||||
+ sechdrs[i].sh_size)) {
|
||||
kbuf->image->start -= sechdrs[i].sh_addr;
|
||||
kbuf->image->start += kbuf->mem + offset;
|
||||
}
|
||||
|
||||
src = (void *)pi->ehdr + sechdrs[i].sh_offset;
|
||||
dst = pi->purgatory_buf + offset;
|
||||
memcpy(dst, src, sechdrs[i].sh_size);
|
||||
|
||||
sechdrs[i].sh_addr = kbuf->mem + offset;
|
||||
sechdrs[i].sh_offset = offset;
|
||||
offset += sechdrs[i].sh_size;
|
||||
}
|
||||
|
||||
/* Update entry point based on load address of text section */
|
||||
if (entry_sidx >= 0)
|
||||
entry += sechdrs[entry_sidx].sh_addr;
|
||||
|
||||
/* Make kernel jump to purgatory after shutdown */
|
||||
image->start = entry;
|
||||
|
||||
/* Used later to get/set symbol values */
|
||||
pi->sechdrs = sechdrs;
|
||||
|
||||
/*
|
||||
* Used later to identify which section is purgatory and skip it
|
||||
* from checksumming.
|
||||
*/
|
||||
pi->purgatory_buf = kbuf.buffer;
|
||||
return ret;
|
||||
out:
|
||||
vfree(sechdrs);
|
||||
vfree(kbuf.buffer);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kexec_apply_relocations(struct kimage *image)
|
||||
{
|
||||
int i, ret;
|
||||
struct purgatory_info *pi = &image->purgatory_info;
|
||||
Elf_Shdr *sechdrs = pi->sechdrs;
|
||||
const Elf_Shdr *sechdrs;
|
||||
|
||||
sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
|
||||
|
||||
/* Apply relocations */
|
||||
for (i = 0; i < pi->ehdr->e_shnum; i++) {
|
||||
Elf_Shdr *section, *symtab;
|
||||
const Elf_Shdr *relsec;
|
||||
const Elf_Shdr *symtab;
|
||||
Elf_Shdr *section;
|
||||
|
||||
if (sechdrs[i].sh_type != SHT_RELA &&
|
||||
sechdrs[i].sh_type != SHT_REL)
|
||||
relsec = sechdrs + i;
|
||||
|
||||
if (relsec->sh_type != SHT_RELA &&
|
||||
relsec->sh_type != SHT_REL)
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@ -834,12 +865,12 @@ static int kexec_apply_relocations(struct kimage *image)
|
|||
* symbol table. And ->sh_info contains section header
|
||||
* index of section to which relocations apply.
|
||||
*/
|
||||
if (sechdrs[i].sh_info >= pi->ehdr->e_shnum ||
|
||||
sechdrs[i].sh_link >= pi->ehdr->e_shnum)
|
||||
if (relsec->sh_info >= pi->ehdr->e_shnum ||
|
||||
relsec->sh_link >= pi->ehdr->e_shnum)
|
||||
return -ENOEXEC;
|
||||
|
||||
section = &sechdrs[sechdrs[i].sh_info];
|
||||
symtab = &sechdrs[sechdrs[i].sh_link];
|
||||
section = pi->sechdrs + relsec->sh_info;
|
||||
symtab = sechdrs + relsec->sh_link;
|
||||
|
||||
if (!(section->sh_flags & SHF_ALLOC))
|
||||
continue;
|
||||
|
@ -856,12 +887,12 @@ static int kexec_apply_relocations(struct kimage *image)
|
|||
* Respective architecture needs to provide support for applying
|
||||
* relocations of type SHT_RELA/SHT_REL.
|
||||
*/
|
||||
if (sechdrs[i].sh_type == SHT_RELA)
|
||||
ret = arch_kexec_apply_relocations_add(pi->ehdr,
|
||||
sechdrs, i);
|
||||
else if (sechdrs[i].sh_type == SHT_REL)
|
||||
ret = arch_kexec_apply_relocations(pi->ehdr,
|
||||
sechdrs, i);
|
||||
if (relsec->sh_type == SHT_RELA)
|
||||
ret = arch_kexec_apply_relocations_add(pi, section,
|
||||
relsec, symtab);
|
||||
else if (relsec->sh_type == SHT_REL)
|
||||
ret = arch_kexec_apply_relocations(pi, section,
|
||||
relsec, symtab);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -869,10 +900,18 @@ static int kexec_apply_relocations(struct kimage *image)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Load relocatable purgatory object and relocate it appropriately */
|
||||
int kexec_load_purgatory(struct kimage *image, unsigned long min,
|
||||
unsigned long max, int top_down,
|
||||
unsigned long *load_addr)
|
||||
/*
|
||||
* kexec_load_purgatory - Load and relocate the purgatory object.
|
||||
* @image: Image to add the purgatory to.
|
||||
* @kbuf: Memory parameters to use.
|
||||
*
|
||||
* Allocates the memory needed for image->purgatory_info.sechdrs and
|
||||
* image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
|
||||
* to free the memory after use.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
|
||||
{
|
||||
struct purgatory_info *pi = &image->purgatory_info;
|
||||
int ret;
|
||||
|
@ -880,55 +919,51 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
|
|||
if (kexec_purgatory_size <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (kexec_purgatory_size < sizeof(Elf_Ehdr))
|
||||
return -ENOEXEC;
|
||||
pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
|
||||
|
||||
pi->ehdr = (Elf_Ehdr *)kexec_purgatory;
|
||||
|
||||
if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0
|
||||
|| pi->ehdr->e_type != ET_REL
|
||||
|| !elf_check_arch(pi->ehdr)
|
||||
|| pi->ehdr->e_shentsize != sizeof(Elf_Shdr))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (pi->ehdr->e_shoff >= kexec_purgatory_size
|
||||
|| (pi->ehdr->e_shnum * sizeof(Elf_Shdr) >
|
||||
kexec_purgatory_size - pi->ehdr->e_shoff))
|
||||
return -ENOEXEC;
|
||||
|
||||
ret = __kexec_load_purgatory(image, min, max, top_down);
|
||||
ret = kexec_purgatory_setup_kbuf(pi, kbuf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
|
||||
if (ret)
|
||||
goto out_free_kbuf;
|
||||
|
||||
ret = kexec_apply_relocations(image);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
*load_addr = pi->purgatory_load_addr;
|
||||
return 0;
|
||||
out:
|
||||
vfree(pi->sechdrs);
|
||||
pi->sechdrs = NULL;
|
||||
|
||||
out_free_kbuf:
|
||||
vfree(pi->purgatory_buf);
|
||||
pi->purgatory_buf = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
|
||||
const char *name)
|
||||
/*
|
||||
* kexec_purgatory_find_symbol - find a symbol in the purgatory
|
||||
* @pi: Purgatory to search in.
|
||||
* @name: Name of the symbol.
|
||||
*
|
||||
* Return: pointer to symbol in read-only symtab on success, NULL on error.
|
||||
*/
|
||||
static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
|
||||
const char *name)
|
||||
{
|
||||
Elf_Sym *syms;
|
||||
Elf_Shdr *sechdrs;
|
||||
Elf_Ehdr *ehdr;
|
||||
int i, k;
|
||||
const Elf_Shdr *sechdrs;
|
||||
const Elf_Ehdr *ehdr;
|
||||
const Elf_Sym *syms;
|
||||
const char *strtab;
|
||||
int i, k;
|
||||
|
||||
if (!pi->sechdrs || !pi->ehdr)
|
||||
if (!pi->ehdr)
|
||||
return NULL;
|
||||
|
||||
sechdrs = pi->sechdrs;
|
||||
ehdr = pi->ehdr;
|
||||
sechdrs = (void *)ehdr + ehdr->e_shoff;
|
||||
|
||||
for (i = 0; i < ehdr->e_shnum; i++) {
|
||||
if (sechdrs[i].sh_type != SHT_SYMTAB)
|
||||
|
@ -937,8 +972,8 @@ static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
|
|||
if (sechdrs[i].sh_link >= ehdr->e_shnum)
|
||||
/* Invalid strtab section number */
|
||||
continue;
|
||||
strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset;
|
||||
syms = (Elf_Sym *)sechdrs[i].sh_offset;
|
||||
strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
|
||||
syms = (void *)ehdr + sechdrs[i].sh_offset;
|
||||
|
||||
/* Go through symbols for a match */
|
||||
for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
|
||||
|
@ -966,7 +1001,7 @@ static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
|
|||
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
|
||||
{
|
||||
struct purgatory_info *pi = &image->purgatory_info;
|
||||
Elf_Sym *sym;
|
||||
const Elf_Sym *sym;
|
||||
Elf_Shdr *sechdr;
|
||||
|
||||
sym = kexec_purgatory_find_symbol(pi, name);
|
||||
|
@ -989,9 +1024,9 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
|
|||
int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
|
||||
void *buf, unsigned int size, bool get_value)
|
||||
{
|
||||
Elf_Sym *sym;
|
||||
Elf_Shdr *sechdrs;
|
||||
struct purgatory_info *pi = &image->purgatory_info;
|
||||
const Elf_Sym *sym;
|
||||
Elf_Shdr *sec;
|
||||
char *sym_buf;
|
||||
|
||||
sym = kexec_purgatory_find_symbol(pi, name);
|
||||
|
@ -1004,16 +1039,15 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
sechdrs = pi->sechdrs;
|
||||
sec = pi->sechdrs + sym->st_shndx;
|
||||
|
||||
if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
|
||||
if (sec->sh_type == SHT_NOBITS) {
|
||||
pr_err("symbol %s is in a bss section. Cannot %s\n", name,
|
||||
get_value ? "get" : "set");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset +
|
||||
sym->st_value;
|
||||
sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
|
||||
|
||||
if (get_value)
|
||||
memcpy((void *)buf, sym_buf, size);
|
||||
|
@ -1022,3 +1056,174 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
|
||||
|
||||
int crash_exclude_mem_range(struct crash_mem *mem,
|
||||
unsigned long long mstart, unsigned long long mend)
|
||||
{
|
||||
int i, j;
|
||||
unsigned long long start, end;
|
||||
struct crash_mem_range temp_range = {0, 0};
|
||||
|
||||
for (i = 0; i < mem->nr_ranges; i++) {
|
||||
start = mem->ranges[i].start;
|
||||
end = mem->ranges[i].end;
|
||||
|
||||
if (mstart > end || mend < start)
|
||||
continue;
|
||||
|
||||
/* Truncate any area outside of range */
|
||||
if (mstart < start)
|
||||
mstart = start;
|
||||
if (mend > end)
|
||||
mend = end;
|
||||
|
||||
/* Found completely overlapping range */
|
||||
if (mstart == start && mend == end) {
|
||||
mem->ranges[i].start = 0;
|
||||
mem->ranges[i].end = 0;
|
||||
if (i < mem->nr_ranges - 1) {
|
||||
/* Shift rest of the ranges to left */
|
||||
for (j = i; j < mem->nr_ranges - 1; j++) {
|
||||
mem->ranges[j].start =
|
||||
mem->ranges[j+1].start;
|
||||
mem->ranges[j].end =
|
||||
mem->ranges[j+1].end;
|
||||
}
|
||||
}
|
||||
mem->nr_ranges--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mstart > start && mend < end) {
|
||||
/* Split original range */
|
||||
mem->ranges[i].end = mstart - 1;
|
||||
temp_range.start = mend + 1;
|
||||
temp_range.end = end;
|
||||
} else if (mstart != start)
|
||||
mem->ranges[i].end = mstart - 1;
|
||||
else
|
||||
mem->ranges[i].start = mend + 1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* If a split happened, add the split to array */
|
||||
if (!temp_range.end)
|
||||
return 0;
|
||||
|
||||
/* Split happened */
|
||||
if (i == mem->max_nr_ranges - 1)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Location where new range should go */
|
||||
j = i + 1;
|
||||
if (j < mem->nr_ranges) {
|
||||
/* Move over all ranges one slot towards the end */
|
||||
for (i = mem->nr_ranges - 1; i >= j; i--)
|
||||
mem->ranges[i + 1] = mem->ranges[i];
|
||||
}
|
||||
|
||||
mem->ranges[j].start = temp_range.start;
|
||||
mem->ranges[j].end = temp_range.end;
|
||||
mem->nr_ranges++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
|
||||
void **addr, unsigned long *sz)
|
||||
{
|
||||
Elf64_Ehdr *ehdr;
|
||||
Elf64_Phdr *phdr;
|
||||
unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
|
||||
unsigned char *buf;
|
||||
unsigned int cpu, i;
|
||||
unsigned long long notes_addr;
|
||||
unsigned long mstart, mend;
|
||||
|
||||
/* extra phdr for vmcoreinfo elf note */
|
||||
nr_phdr = nr_cpus + 1;
|
||||
nr_phdr += mem->nr_ranges;
|
||||
|
||||
/*
|
||||
* kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
|
||||
* area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
|
||||
* I think this is required by tools like gdb. So same physical
|
||||
* memory will be mapped in two elf headers. One will contain kernel
|
||||
* text virtual addresses and other will have __va(physical) addresses.
|
||||
*/
|
||||
|
||||
nr_phdr++;
|
||||
elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
|
||||
elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
|
||||
|
||||
buf = vzalloc(elf_sz);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ehdr = (Elf64_Ehdr *)buf;
|
||||
phdr = (Elf64_Phdr *)(ehdr + 1);
|
||||
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
|
||||
ehdr->e_ident[EI_CLASS] = ELFCLASS64;
|
||||
ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
|
||||
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
|
||||
ehdr->e_ident[EI_OSABI] = ELF_OSABI;
|
||||
memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
|
||||
ehdr->e_type = ET_CORE;
|
||||
ehdr->e_machine = ELF_ARCH;
|
||||
ehdr->e_version = EV_CURRENT;
|
||||
ehdr->e_phoff = sizeof(Elf64_Ehdr);
|
||||
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
|
||||
ehdr->e_phentsize = sizeof(Elf64_Phdr);
|
||||
|
||||
/* Prepare one phdr of type PT_NOTE for each present cpu */
|
||||
for_each_present_cpu(cpu) {
|
||||
phdr->p_type = PT_NOTE;
|
||||
notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
|
||||
phdr->p_offset = phdr->p_paddr = notes_addr;
|
||||
phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
|
||||
(ehdr->e_phnum)++;
|
||||
phdr++;
|
||||
}
|
||||
|
||||
/* Prepare one PT_NOTE header for vmcoreinfo */
|
||||
phdr->p_type = PT_NOTE;
|
||||
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
|
||||
phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
|
||||
(ehdr->e_phnum)++;
|
||||
phdr++;
|
||||
|
||||
/* Prepare PT_LOAD type program header for kernel text region */
|
||||
if (kernel_map) {
|
||||
phdr->p_type = PT_LOAD;
|
||||
phdr->p_flags = PF_R|PF_W|PF_X;
|
||||
phdr->p_vaddr = (Elf64_Addr)_text;
|
||||
phdr->p_filesz = phdr->p_memsz = _end - _text;
|
||||
phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
|
||||
ehdr->e_phnum++;
|
||||
phdr++;
|
||||
}
|
||||
|
||||
/* Go through all the ranges in mem->ranges[] and prepare phdr */
|
||||
for (i = 0; i < mem->nr_ranges; i++) {
|
||||
mstart = mem->ranges[i].start;
|
||||
mend = mem->ranges[i].end;
|
||||
|
||||
phdr->p_type = PT_LOAD;
|
||||
phdr->p_flags = PF_R|PF_W|PF_X;
|
||||
phdr->p_offset = mstart;
|
||||
|
||||
phdr->p_paddr = mstart;
|
||||
phdr->p_vaddr = (unsigned long long) __va(mstart);
|
||||
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
|
||||
phdr->p_align = 0;
|
||||
ehdr->e_phnum++;
|
||||
phdr++;
|
||||
pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
|
||||
phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
|
||||
ehdr->e_phnum, phdr->p_offset);
|
||||
}
|
||||
|
||||
*addr = buf;
|
||||
*sz = elf_sz;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -651,7 +651,8 @@ static int __find_resource(struct resource *root, struct resource *old,
|
|||
alloc.start = constraint->alignf(constraint->alignf_data, &avail,
|
||||
size, constraint->align);
|
||||
alloc.end = alloc.start + size - 1;
|
||||
if (resource_contains(&avail, &alloc)) {
|
||||
if (alloc.start <= alloc.end &&
|
||||
resource_contains(&avail, &alloc)) {
|
||||
new->start = alloc.start;
|
||||
new->end = alloc.end;
|
||||
return 0;
|
||||
|
|
|
@ -16,9 +16,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/sha256.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include "sha256.h"
|
||||
#include "../boot/string.h"
|
||||
|
||||
static inline u32 Ch(u32 x, u32 y, u32 z)
|
||||
{
|
|
@ -2719,7 +2719,6 @@ int filemap_page_mkwrite(struct vm_fault *vmf)
|
|||
sb_end_pagefault(inode->i_sb);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_page_mkwrite);
|
||||
|
||||
const struct vm_operations_struct generic_file_vm_ops = {
|
||||
.fault = filemap_fault,
|
||||
|
@ -2750,6 +2749,10 @@ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
return generic_file_mmap(file, vma);
|
||||
}
|
||||
#else
|
||||
int filemap_page_mkwrite(struct vm_fault *vmf)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
|
||||
{
|
||||
return -ENOSYS;
|
||||
|
@ -2760,6 +2763,7 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
|
|||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
EXPORT_SYMBOL(filemap_page_mkwrite);
|
||||
EXPORT_SYMBOL(generic_file_mmap);
|
||||
EXPORT_SYMBOL(generic_file_readonly_mmap);
|
||||
|
||||
|
|
9
mm/gup.c
9
mm/gup.c
|
@ -1740,7 +1740,9 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
|
|||
|
||||
/*
|
||||
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
|
||||
* the regular GUP. It will only return non-negative values.
|
||||
* the regular GUP.
|
||||
* Note a difference with get_user_pages_fast: this always returns the
|
||||
* number of pages pinned, 0 if no pages were pinned.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
|
@ -1806,9 +1808,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
|
||||
if (nr_pages <= 0)
|
||||
return 0;
|
||||
|
||||
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
|
||||
(void __user *)start, len)))
|
||||
return 0;
|
||||
return -EFAULT;
|
||||
|
||||
if (gup_fast_permitted(start, nr_pages, write)) {
|
||||
local_irq_disable();
|
||||
|
|
|
@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
|
|||
struct page **pages;
|
||||
|
||||
nr_pages = gup->size / PAGE_SIZE;
|
||||
pages = kvmalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
|
||||
pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -41,6 +41,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
|
|||
}
|
||||
|
||||
nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i);
|
||||
if (nr <= 0)
|
||||
break;
|
||||
i += nr;
|
||||
}
|
||||
end_time = ktime_get();
|
||||
|
|
|
@ -4086,7 +4086,8 @@ static void cache_reap(struct work_struct *w)
|
|||
next_reap_node();
|
||||
out:
|
||||
/* Set up the next iteration */
|
||||
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
|
||||
schedule_delayed_work_on(smp_processor_id(), work,
|
||||
round_jiffies_relative(REAPTIMEOUT_AC));
|
||||
}
|
||||
|
||||
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
|
||||
|
|
|
@ -297,8 +297,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
|
|||
/*
|
||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
||||
* back to the regular GUP.
|
||||
* If the architecture not support this function, simply return with no
|
||||
* page pinned
|
||||
* Note a difference with get_user_pages_fast: this always returns the
|
||||
* number of pages pinned, 0 if no pages were pinned.
|
||||
* If the architecture does not support this function, simply return with no
|
||||
* pages pinned.
|
||||
*/
|
||||
int __weak __get_user_pages_fast(unsigned long start,
|
||||
int nr_pages, int write, struct page **pages)
|
||||
|
|
Loading…
Reference in New Issue
Block a user