forked from luck/tmp_suning_uos_patched
csky: Add flush_icache_mm to defer flush icache all
Some CPUs don't support icache.va instruction to maintain the whole smp cores' icache. Using icache.all + IPI casue a lot on performace and using defer mechanism could reduce the number of calling icache _flush_all functions. Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
This commit is contained in:
parent
cc1f6563a9
commit
997153b9a7
|
@ -48,6 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
|
|||
|
||||
#define flush_icache_page(vma, page) do {} while (0);
|
||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||
#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
|
||||
#define flush_icache_deferred(mm) do {} while (0);
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
|
|
|
@ -28,3 +28,58 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
|
||||
kunmap_atomic((void *) addr);
|
||||
}
|
||||
|
||||
void flush_icache_deferred(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
cpumask_t *mask = &mm->context.icache_stale_mask;
|
||||
|
||||
if (cpumask_test_cpu(cpu, mask)) {
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
/*
|
||||
* Ensure the remote hart's writes are visible to this hart.
|
||||
* This pairs with a barrier in flush_icache_mm.
|
||||
*/
|
||||
smp_mb();
|
||||
local_icache_inv_all(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void flush_icache_mm_range(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned int cpu;
|
||||
cpumask_t others, *mask;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ICACHE_INS
|
||||
if (mm == current->mm) {
|
||||
icache_inv_range(start, end);
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Mark every hart's icache as needing a flush for this MM. */
|
||||
mask = &mm->context.icache_stale_mask;
|
||||
cpumask_setall(mask);
|
||||
|
||||
/* Flush this hart's I$ now, and mark it as flushed. */
|
||||
cpu = smp_processor_id();
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
local_icache_inv_all(NULL);
|
||||
|
||||
/*
|
||||
* Flush the I$ of other harts concurrently executing, and mark them as
|
||||
* flushed.
|
||||
*/
|
||||
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
||||
|
||||
if (mm != current->active_mm || !cpumask_empty(&others)) {
|
||||
on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
|
||||
cpumask_clear(mask);
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
|
@ -31,15 +31,23 @@ static inline void flush_dcache_page(struct page *page)
|
|||
|
||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||
|
||||
void flush_icache_mm_range(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end);
|
||||
void flush_icache_deferred(struct mm_struct *mm);
|
||||
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
memcpy(dst, src, len); \
|
||||
if (vma->vm_flags & VM_EXEC) \
|
||||
cache_wbinv_range((unsigned long)dst, \
|
||||
(unsigned long)dst + len); \
|
||||
if (vma->vm_flags & VM_EXEC) { \
|
||||
dcache_wb_range((unsigned long)dst, \
|
||||
(unsigned long)dst + len); \
|
||||
flush_icache_mm_range(current->mm, \
|
||||
(unsigned long)dst, \
|
||||
(unsigned long)dst + len); \
|
||||
} \
|
||||
} while (0)
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy(dst, src, len)
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#ifndef __ASM_CSKY_CACHEFLUSH_H
|
||||
#define __ASM_CSKY_CACHEFLUSH_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <abi/cacheflush.h>
|
||||
|
||||
#endif /* __ASM_CSKY_CACHEFLUSH_H */
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
typedef struct {
|
||||
atomic64_t asid;
|
||||
void *vdso;
|
||||
cpumask_t icache_stale_mask;
|
||||
} mm_context_t;
|
||||
|
||||
#endif /* __ASM_CSKY_MMU_H */
|
||||
|
|
|
@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
|
||||
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
||||
write_mmu_entryhi(next->context.asid.counter);
|
||||
|
||||
flush_icache_deferred(next);
|
||||
}
|
||||
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
#include <linux/syscalls.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cachectl.h>
|
||||
|
||||
SYSCALL_DEFINE3(cacheflush,
|
||||
|
@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush,
|
|||
{
|
||||
switch (cache) {
|
||||
case ICACHE:
|
||||
icache_inv_range((unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
break;
|
||||
case BCACHE:
|
||||
flush_icache_mm_range(current->mm,
|
||||
(unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
case DCACHE:
|
||||
dcache_wb_range((unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
break;
|
||||
case BCACHE:
|
||||
cache_wbinv_range((unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user