forked from luck/tmp_suning_uos_patched
riscv: Add KASAN support
This patch ports the feature Kernel Address SANitizer (KASAN). Note: The start address of shadow memory is at the beginning of kernel space, which is 2^64 - (2^39 / 2) in SV39. The size of the kernel space is 2^38 bytes so the size of shadow memory should be 2^38 / 8. Thus, the shadow memory would not overlap with the fixmap area. There are currently two limitations in this port, 1. RV64 only: KASAN need large address space for extra shadow memory region. 2. KASAN can't debug the modules since the modules are allocated in VMALLOC area. We mapped the shadow memory, which corresponding to VMALLOC area, to the kasan_early_shadow_page because we don't have enough physical space for all the shadow memory corresponding to VMALLOC area. Signed-off-by: Nick Hu <nickhu@andestech.com> Reported-by: Greentime Hu <green.hu@gmail.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
This commit is contained in:
parent
57ee58e393
commit
8ad8b72721
|
@ -66,6 +66,7 @@ config RISCV
|
|||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_ARCH_KASAN if MMU && 64BIT
|
||||
|
||||
config ARCH_MMAP_RND_BITS_MIN
|
||||
default 18 if 64BIT
|
||||
|
|
27
arch/riscv/include/asm/kasan.h
Normal file
27
arch/riscv/include/asm/kasan.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2019 Andes Technology Corporation */
|
||||
|
||||
#ifndef __ASM_KASAN_H
|
||||
#define __ASM_KASAN_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
|
||||
#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
|
||||
#define KASAN_SHADOW_START 0xffffffc000000000 /* 2^64 - 2^38 */
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
|
||||
|
||||
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
|
||||
(64 - KASAN_SHADOW_SCALE_SHIFT)))
|
||||
|
||||
void kasan_init(void);
|
||||
asmlinkage void kasan_early_init(void);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __ASM_KASAN_H */
|
|
@ -58,6 +58,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
|
|||
return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
|
||||
}
|
||||
|
||||
static inline struct page *pud_page(pud_t pud)
|
||||
{
|
||||
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
|
||||
}
|
||||
|
||||
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
|
||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
||||
|
|
|
@ -11,8 +11,17 @@
|
|||
|
||||
#define __HAVE_ARCH_MEMSET
|
||||
extern asmlinkage void *memset(void *, int, size_t);
|
||||
extern asmlinkage void *__memset(void *, int, size_t);
|
||||
|
||||
#define __HAVE_ARCH_MEMCPY
|
||||
extern asmlinkage void *memcpy(void *, const void *, size_t);
|
||||
extern asmlinkage void *__memcpy(void *, const void *, size_t);
|
||||
|
||||
/* For those files which don't want to check by kasan. */
|
||||
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
||||
|
||||
#define memcpy(dst, src, len) __memcpy(dst, src, len)
|
||||
#define memset(s, c, n) __memset(s, c, n)
|
||||
|
||||
#endif
|
||||
#endif /* _ASM_RISCV_STRING_H */
|
||||
|
|
|
@ -121,6 +121,9 @@ clear_bss_done:
|
|||
sw zero, TASK_TI_CPU(tp)
|
||||
la sp, init_thread_union + THREAD_SIZE
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
call kasan_early_init
|
||||
#endif
|
||||
/* Start the kernel */
|
||||
call parse_dtb
|
||||
tail start_kernel
|
||||
|
|
|
@ -11,3 +11,5 @@
|
|||
*/
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(__memset);
|
||||
EXPORT_SYMBOL(__memcpy);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <asm/smp.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/kasan.h>
|
||||
|
||||
#include "head.h"
|
||||
|
||||
|
@ -74,6 +75,10 @@ void __init setup_arch(char **cmdline_p)
|
|||
swiotlb_init(1);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
kasan_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
setup_smp();
|
||||
#endif
|
||||
|
|
|
@ -46,6 +46,7 @@ SECTIONS
|
|||
KPROBES_TEXT
|
||||
ENTRY_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
*(.fixup)
|
||||
_etext = .;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,8 @@
|
|||
#include <asm/asm.h>
|
||||
|
||||
/* void *memcpy(void *, const void *, size_t) */
|
||||
ENTRY(memcpy)
|
||||
ENTRY(__memcpy)
|
||||
WEAK(memcpy)
|
||||
move t6, a0 /* Preserve return value */
|
||||
|
||||
/* Defer to byte-oriented copy for small sizes */
|
||||
|
@ -104,4 +105,4 @@ ENTRY(memcpy)
|
|||
bltu a1, a3, 5b
|
||||
6:
|
||||
ret
|
||||
END(memcpy)
|
||||
END(__memcpy)
|
||||
|
|
|
@ -8,7 +8,8 @@
|
|||
#include <asm/asm.h>
|
||||
|
||||
/* void *memset(void *, int, size_t) */
|
||||
ENTRY(memset)
|
||||
ENTRY(__memset)
|
||||
WEAK(memset)
|
||||
move t0, a0 /* Preserve return value */
|
||||
|
||||
/* Defer to byte-oriented fill for small sizes */
|
||||
|
@ -109,4 +110,4 @@ ENTRY(memset)
|
|||
bltu t0, a3, 5b
|
||||
6:
|
||||
ret
|
||||
END(memset)
|
||||
END(__memset)
|
||||
|
|
|
@ -15,3 +15,9 @@ ifeq ($(CONFIG_MMU),y)
|
|||
obj-$(CONFIG_SMP) += tlbflush.o
|
||||
endif
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_KASAN) += kasan_init.o
|
||||
|
||||
ifdef CONFIG_KASAN
|
||||
KASAN_SANITIZE_kasan_init.o := n
|
||||
KASAN_SANITIZE_init.o := n
|
||||
endif
|
||||
|
|
104
arch/riscv/mm/kasan_init.c
Normal file
104
arch/riscv/mm/kasan_init.c
Normal file
|
@ -0,0 +1,104 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2019 Andes Technology Corporation
|
||||
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
extern pgd_t early_pg_dir[PTRS_PER_PGD];
|
||||
asmlinkage void __init kasan_early_init(void)
|
||||
{
|
||||
uintptr_t i;
|
||||
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||
set_pte(kasan_early_shadow_pte + i,
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
PAGE_KERNEL));
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; ++i)
|
||||
set_pmd(kasan_early_shadow_pmd + i,
|
||||
pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
||||
i += PGDIR_SIZE, ++pgd)
|
||||
set_pgd(pgd,
|
||||
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
/* init for swapper_pg_dir */
|
||||
pgd = pgd_offset_k(KASAN_SHADOW_START);
|
||||
|
||||
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
||||
i += PGDIR_SIZE, ++pgd)
|
||||
set_pgd(pgd,
|
||||
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
static void __init populate(void *start, void *end)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
||||
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
||||
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
|
||||
unsigned long n_pmds =
|
||||
(n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
|
||||
n_pages / PTRS_PER_PTE;
|
||||
pgd_t *pgd = pgd_offset_k(vaddr);
|
||||
pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
|
||||
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < n_pages; i++) {
|
||||
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
||||
}
|
||||
|
||||
for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
|
||||
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
|
||||
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
flush_tlb_all();
|
||||
memset(start, 0, end - start);
|
||||
}
|
||||
|
||||
void __init kasan_init(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned long i;
|
||||
|
||||
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
void *start = (void *)__va(reg->base);
|
||||
void *end = (void *)__va(reg->base + reg->size);
|
||||
|
||||
if (start >= end)
|
||||
break;
|
||||
|
||||
populate(kasan_mem_to_shadow(start),
|
||||
kasan_mem_to_shadow(end));
|
||||
};
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||
set_pte(&kasan_early_shadow_pte[i],
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
|
||||
|
||||
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
||||
init_task.kasan_depth = 0;
|
||||
}
|
Loading…
Reference in New Issue
Block a user