forked from luck/tmp_suning_uos_patched
f1b65f20fb
RISC-V systems perform TLB shootdows via the SBI, which currently performs an IPI to each of the remote harts which then performs a local TLB flush. This process is a bit on the slow side, but we can at least speed it up for some common cases by restricting the set of harts to shoot down to the actual set of harts that are currently participating in the given mm context, as opposed to the entire system. This should provide a measurable performance increase, but we haven't measured it. Regardless, it seems like obviously the right thing to do here. Signed-off-by: Andrew Waterman <andrew@sifive.com> Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
70 lines
1.9 KiB
C
70 lines
1.9 KiB
C
/*
|
|
* Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#ifndef _ASM_RISCV_TLBFLUSH_H
|
|
#define _ASM_RISCV_TLBFLUSH_H
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
/*
|
|
* Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction
|
|
* cache as well, so a 'fence.i' is not necessary.
|
|
*/
|
|
static inline void local_flush_tlb_all(void)
|
|
{
|
|
__asm__ __volatile__ ("sfence.vma" : : : "memory");
|
|
}
|
|
|
|
/* Flush one page from local TLB */
|
|
static inline void local_flush_tlb_page(unsigned long addr)
|
|
{
|
|
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
|
|
}
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
#define flush_tlb_all() local_flush_tlb_all()
|
|
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
local_flush_tlb_all();
|
|
}
|
|
|
|
#define flush_tlb_mm(mm) flush_tlb_all()
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
#include <asm/sbi.h>
|
|
|
|
#define flush_tlb_all() sbi_remote_sfence_vma(0, 0, -1)
|
|
#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
|
|
#define flush_tlb_range(vma, start, end) \
|
|
sbi_remote_sfence_vma(mm_cpumask((vma)->vm_mm)->bits, \
|
|
start, (end) - (start))
|
|
#define flush_tlb_mm(mm) \
|
|
sbi_remote_sfence_vma(mm_cpumask(mm)->bits, 0, -1)
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/* Flush a range of kernel pages */
|
|
static inline void flush_tlb_kernel_range(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
flush_tlb_all();
|
|
}
|
|
|
|
#endif /* _ASM_RISCV_TLBFLUSH_H */
|