diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ccc7087d3c4e..a62cd077457b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x) * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ -#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET +#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 619b1dd7bcde..d18a44940968 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -54,18 +54,17 @@ ENTRY(efi_stub_entry) b.eq efi_load_fail /* - * efi_entry() will have relocated the kernel image if necessary - * and we return here with device tree address in x0 and the kernel - * entry point stored at *image_addr. Save those values in registers - * which are callee preserved. + * efi_entry() will have copied the kernel image if necessary and we + * return here with device tree address in x0 and the kernel entry + * point stored at *image_addr. Save those values in registers which + * are callee preserved. */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address mov x21, x0 /* - * Flush dcache covering current runtime addresses - * of kernel text/data. Then flush all of icache. + * Calculate size of the kernel Image (same for original and copy). */ adrp x1, _text add x1, x1, #:lo12:_text @@ -73,9 +72,24 @@ ENTRY(efi_stub_entry) add x2, x2, #:lo12:_edata sub x1, x2, x1 + /* + * Flush the copied Image to the PoC, and ensure it is not shadowed by + * stale icache entries from before relocation. + */ bl __flush_dcache_area ic ialluis + /* + * Ensure that the rest of this function (in the original Image) is + * visible when the caches are disabled. The I-cache can't have stale + * entries for the VA range of the current image, so no maintenance is + * necessary. + */ + adr x0, efi_stub_entry + adr x1, efi_stub_entry_end + sub x1, x1, x0 + bl __flush_dcache_area + /* Turn off Dcache and MMU */ mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 @@ -105,4 +119,5 @@ efi_load_fail: ldp x29, x30, [sp], #32 ret +efi_stub_entry_end: ENDPROC(efi_stub_entry) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index e007714ded04..8cd27fedc8b6 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) * which ends with "dsb; isb" pair guaranteeing global * visibility. */ - atomic_set(&pp->cpu_count, -1); + /* Notify other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); } else { - while (atomic_read(&pp->cpu_count) != -1) + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) cpu_relax(); isb(); } diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 6e0ed93d51fe..c17967fdf5f6 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 ) sub x1, x1, #2 4: adds x1, x1, #1 b.mi 5f - strb wzr, [x0] +USER(9f, strb wzr, [x0] ) 5: mov x0, #0 ret ENDPROC(__clear_user) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 0bf90d26e745..f4f8b500f74c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys, + unsigned long end, phys_addr_t phys, int map_io) { pud_t *pud;