forked from luck/tmp_suning_uos_patched
sh: Make cache flushers SMP-aware.
This does a bit of rework for making the cache flushers SMP-aware. The function pointer-based flushers are renamed to local variants with the exported interface being commonly implemented and wrapping as necessary. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
f9bd71f255
commit
f26b2a562b
|
@ -19,23 +19,40 @@
|
|||
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
|
||||
* - flush_cache_sigtramp(vaddr) flushes the signal trampoline
|
||||
*/
|
||||
extern void (*flush_cache_all)(void);
|
||||
extern void (*flush_cache_mm)(struct mm_struct *mm);
|
||||
extern void (*flush_cache_dup_mm)(struct mm_struct *mm);
|
||||
extern void (*flush_cache_page)(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long pfn);
|
||||
extern void (*flush_cache_range)(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
extern void (*flush_dcache_page)(struct page *page);
|
||||
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
|
||||
extern void (*flush_icache_page)(struct vm_area_struct *vma,
|
||||
struct page *page);
|
||||
extern void (*flush_cache_sigtramp)(unsigned long address);
|
||||
extern void (*local_flush_cache_all)(void *args);
|
||||
extern void (*local_flush_cache_mm)(void *args);
|
||||
extern void (*local_flush_cache_dup_mm)(void *args);
|
||||
extern void (*local_flush_cache_page)(void *args);
|
||||
extern void (*local_flush_cache_range)(void *args);
|
||||
extern void (*local_flush_dcache_page)(void *args);
|
||||
extern void (*local_flush_icache_range)(void *args);
|
||||
extern void (*local_flush_icache_page)(void *args);
|
||||
extern void (*local_flush_cache_sigtramp)(void *args);
|
||||
|
||||
static inline void cache_noop(void *args) { }
|
||||
|
||||
extern void (*__flush_wback_region)(void *start, int size);
|
||||
extern void (*__flush_purge_region)(void *start, int size);
|
||||
extern void (*__flush_invalidate_region)(void *start, int size);
|
||||
|
||||
extern void flush_cache_all(void);
|
||||
extern void flush_cache_mm(struct mm_struct *mm);
|
||||
extern void flush_cache_dup_mm(struct mm_struct *mm);
|
||||
extern void flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long pfn);
|
||||
extern void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
extern void flush_dcache_page(struct page *page);
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern void flush_icache_page(struct vm_area_struct *vma,
|
||||
struct page *page);
|
||||
extern void flush_cache_sigtramp(unsigned long address);
|
||||
|
||||
struct flusher_data {
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long addr1, addr2;
|
||||
};
|
||||
|
||||
#define ARCH_HAS_FLUSH_ANON_PAGE
|
||||
extern void __flush_anon_page(struct page *page, unsigned long);
|
||||
|
||||
|
|
|
@ -97,13 +97,15 @@ static void sh2a__flush_invalidate_region(void *start, int size)
|
|||
}
|
||||
|
||||
/* WBack O-Cache and flush I-Cache */
|
||||
static void sh2a_flush_icache_range(unsigned long start, unsigned long end)
|
||||
static void sh2a_flush_icache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long start, end;
|
||||
unsigned long v;
|
||||
unsigned long flags;
|
||||
|
||||
start = start & ~(L1_CACHE_BYTES-1);
|
||||
end = (end + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
|
||||
start = data->addr1 & ~(L1_CACHE_BYTES-1);
|
||||
end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
@ -130,7 +132,7 @@ static void sh2a_flush_icache_range(unsigned long start, unsigned long end)
|
|||
|
||||
void __init sh2a_cache_init(void)
|
||||
{
|
||||
flush_icache_range = sh2a_flush_icache_range;
|
||||
local_flush_icache_range = sh2a_flush_icache_range;
|
||||
|
||||
__flush_wback_region = sh2a__flush_wback_region;
|
||||
__flush_purge_region = sh2a__flush_purge_region;
|
||||
|
|
|
@ -43,15 +43,20 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
|
|||
* Called from kernel/module.c:sys_init_module and routine for a.out format,
|
||||
* signal handler code and kprobes code
|
||||
*/
|
||||
static void sh4_flush_icache_range(unsigned long start, unsigned long end)
|
||||
static void sh4_flush_icache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
int icacheaddr;
|
||||
unsigned long start, end;
|
||||
unsigned long flags, v;
|
||||
int i;
|
||||
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
/* If there are too many pages then just blow the caches */
|
||||
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
|
||||
flush_cache_all();
|
||||
local_flush_cache_all(args);
|
||||
} else {
|
||||
/* selectively flush d-cache then invalidate the i-cache */
|
||||
/* this is inefficient, so only use for small ranges */
|
||||
|
@ -104,7 +109,7 @@ static inline void flush_cache_4096(unsigned long start,
|
|||
* Write back & invalidate the D-cache of the page.
|
||||
* (To avoid "alias" issues)
|
||||
*/
|
||||
static void sh4_flush_dcache_page(struct page *page)
|
||||
static void sh4_flush_dcache_page(void *page)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
@ -155,7 +160,7 @@ static inline void flush_dcache_all(void)
|
|||
wmb();
|
||||
}
|
||||
|
||||
static void sh4_flush_cache_all(void)
|
||||
static void sh4_flush_cache_all(void *unused)
|
||||
{
|
||||
flush_dcache_all();
|
||||
flush_icache_all();
|
||||
|
@ -247,8 +252,10 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
|
|||
*
|
||||
* Caller takes mm->mmap_sem.
|
||||
*/
|
||||
static void sh4_flush_cache_mm(struct mm_struct *mm)
|
||||
static void sh4_flush_cache_mm(void *arg)
|
||||
{
|
||||
struct mm_struct *mm = arg;
|
||||
|
||||
if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
|
@ -287,12 +294,18 @@ static void sh4_flush_cache_mm(struct mm_struct *mm)
|
|||
* ADDR: Virtual Address (U0 address)
|
||||
* PFN: Physical page number
|
||||
*/
|
||||
static void sh4_flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long pfn)
|
||||
static void sh4_flush_cache_page(void *args)
|
||||
{
|
||||
unsigned long phys = pfn << PAGE_SHIFT;
|
||||
struct flusher_data *data = args;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long address, pfn, phys;
|
||||
unsigned int alias_mask;
|
||||
|
||||
vma = data->vma;
|
||||
address = data->addr1;
|
||||
pfn = data->addr2;
|
||||
phys = pfn << PAGE_SHIFT;
|
||||
|
||||
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
|
@ -335,9 +348,16 @@ static void sh4_flush_cache_page(struct vm_area_struct *vma,
|
|||
* Flushing the cache lines for U0 only isn't enough.
|
||||
* We need to flush for P1 too, which may contain aliases.
|
||||
*/
|
||||
static void sh4_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
static void sh4_flush_cache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long start, end;
|
||||
|
||||
vma = data->vma;
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
|
@ -663,13 +683,13 @@ void __init sh4_cache_init(void)
|
|||
break;
|
||||
}
|
||||
|
||||
flush_icache_range = sh4_flush_icache_range;
|
||||
flush_dcache_page = sh4_flush_dcache_page;
|
||||
flush_cache_all = sh4_flush_cache_all;
|
||||
flush_cache_mm = sh4_flush_cache_mm;
|
||||
flush_cache_dup_mm = sh4_flush_cache_mm;
|
||||
flush_cache_page = sh4_flush_cache_page;
|
||||
flush_cache_range = sh4_flush_cache_range;
|
||||
local_flush_icache_range = sh4_flush_icache_range;
|
||||
local_flush_dcache_page = sh4_flush_dcache_page;
|
||||
local_flush_cache_all = sh4_flush_cache_all;
|
||||
local_flush_cache_mm = sh4_flush_cache_mm;
|
||||
local_flush_cache_dup_mm = sh4_flush_cache_mm;
|
||||
local_flush_cache_page = sh4_flush_cache_page;
|
||||
local_flush_cache_range = sh4_flush_cache_range;
|
||||
|
||||
sh4__flush_region_init();
|
||||
}
|
||||
|
|
|
@ -483,7 +483,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
|
|||
* Invalidate the entire contents of both caches, after writing back to
|
||||
* memory any dirty data from the D-cache.
|
||||
*/
|
||||
static void sh5_flush_cache_all(void)
|
||||
static void sh5_flush_cache_all(void *unused)
|
||||
{
|
||||
sh64_dcache_purge_all();
|
||||
sh64_icache_inv_all();
|
||||
|
@ -510,7 +510,7 @@ static void sh5_flush_cache_all(void)
|
|||
* I-cache. This is similar to the lack of action needed in
|
||||
* flush_tlb_mm - see fault.c.
|
||||
*/
|
||||
static void sh5_flush_cache_mm(struct mm_struct *mm)
|
||||
static void sh5_flush_cache_mm(void *unused)
|
||||
{
|
||||
sh64_dcache_purge_all();
|
||||
}
|
||||
|
@ -522,13 +522,18 @@ static void sh5_flush_cache_mm(struct mm_struct *mm)
|
|||
*
|
||||
* Note, 'end' is 1 byte beyond the end of the range to flush.
|
||||
*/
|
||||
static void sh5_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
static void sh5_flush_cache_range(void *args)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct flusher_data *data = args;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long start, end;
|
||||
|
||||
sh64_dcache_purge_user_range(mm, start, end);
|
||||
sh64_icache_inv_user_page_range(mm, start, end);
|
||||
vma = data->vma;
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
sh64_dcache_purge_user_range(vma->vm_mm, start, end);
|
||||
sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -540,16 +545,23 @@ static void sh5_flush_cache_range(struct vm_area_struct *vma,
|
|||
*
|
||||
* Note, this is called with pte lock held.
|
||||
*/
|
||||
static void sh5_flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long eaddr, unsigned long pfn)
|
||||
static void sh5_flush_cache_page(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long eaddr, pfn;
|
||||
|
||||
vma = data->vma;
|
||||
eaddr = data->addr1;
|
||||
pfn = data->addr2;
|
||||
|
||||
sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
sh64_icache_inv_user_page(vma, eaddr);
|
||||
}
|
||||
|
||||
static void sh5_flush_dcache_page(struct page *page)
|
||||
static void sh5_flush_dcache_page(void *page)
|
||||
{
|
||||
sh64_dcache_purge_phy_page(page_to_phys(page));
|
||||
wmb();
|
||||
|
@ -563,8 +575,14 @@ static void sh5_flush_dcache_page(struct page *page)
|
|||
* mapping, therefore it's guaranteed that there no cache entries for
|
||||
* the range in cache sets of the wrong colour.
|
||||
*/
|
||||
static void sh5_flush_icache_range(unsigned long start, unsigned long end)
|
||||
static void sh5_flush_icache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long start, end;
|
||||
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
__flush_purge_region((void *)start, end);
|
||||
wmb();
|
||||
sh64_icache_inv_kernel_range(start, end);
|
||||
|
@ -576,25 +594,25 @@ static void sh5_flush_icache_range(unsigned long start, unsigned long end)
|
|||
* current process. Used to flush signal trampolines on the stack to
|
||||
* make them executable.
|
||||
*/
|
||||
static void sh5_flush_cache_sigtramp(unsigned long vaddr)
|
||||
static void sh5_flush_cache_sigtramp(void *vaddr)
|
||||
{
|
||||
unsigned long end = vaddr + L1_CACHE_BYTES;
|
||||
unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
|
||||
|
||||
__flush_wback_region((void *)vaddr, L1_CACHE_BYTES);
|
||||
__flush_wback_region(vaddr, L1_CACHE_BYTES);
|
||||
wmb();
|
||||
sh64_icache_inv_current_user_range(vaddr, end);
|
||||
sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
|
||||
}
|
||||
|
||||
void __init sh5_cache_init(void)
|
||||
{
|
||||
flush_cache_all = sh5_flush_cache_all;
|
||||
flush_cache_mm = sh5_flush_cache_mm;
|
||||
flush_cache_dup_mm = sh5_flush_cache_mm;
|
||||
flush_cache_page = sh5_flush_cache_page;
|
||||
flush_cache_range = sh5_flush_cache_range;
|
||||
flush_dcache_page = sh5_flush_dcache_page;
|
||||
flush_icache_range = sh5_flush_icache_range;
|
||||
flush_cache_sigtramp = sh5_flush_cache_sigtramp;
|
||||
local_flush_cache_all = sh5_flush_cache_all;
|
||||
local_flush_cache_mm = sh5_flush_cache_mm;
|
||||
local_flush_cache_dup_mm = sh5_flush_cache_mm;
|
||||
local_flush_cache_page = sh5_flush_cache_page;
|
||||
local_flush_cache_range = sh5_flush_cache_range;
|
||||
local_flush_dcache_page = sh5_flush_dcache_page;
|
||||
local_flush_icache_range = sh5_flush_icache_range;
|
||||
local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
|
||||
|
||||
/* Reserve a slot for dcache colouring in the DTLB */
|
||||
dtlb_cache_slot = sh64_get_wired_dtlb_entry();
|
||||
|
|
|
@ -64,8 +64,14 @@ static inline void cache_wback_all(void)
|
|||
*
|
||||
* Called from kernel/module.c:sys_init_module and routine for a.out format.
|
||||
*/
|
||||
static void sh7705_flush_icache_range(unsigned long start, unsigned long end)
|
||||
static void sh7705_flush_icache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long start, end;
|
||||
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
__flush_wback_region((void *)start, end - start);
|
||||
}
|
||||
|
||||
|
@ -127,7 +133,7 @@ static void __flush_dcache_page(unsigned long phys)
|
|||
* Write back & invalidate the D-cache of the page.
|
||||
* (To avoid "alias" issues)
|
||||
*/
|
||||
static void sh7705_flush_dcache_page(struct page *page)
|
||||
static void sh7705_flush_dcache_page(void *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
||||
|
@ -137,7 +143,7 @@ static void sh7705_flush_dcache_page(struct page *page)
|
|||
__flush_dcache_page(PHYSADDR(page_address(page)));
|
||||
}
|
||||
|
||||
static void sh7705_flush_cache_all(void)
|
||||
static void sh7705_flush_cache_all(void *args)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -149,44 +155,16 @@ static void sh7705_flush_cache_all(void)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void sh7705_flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
/* Is there any good way? */
|
||||
/* XXX: possibly call flush_cache_range for each vm area */
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back and invalidate D-caches.
|
||||
*
|
||||
* START, END: Virtual Address (U0 address)
|
||||
*
|
||||
* NOTE: We need to flush the _physical_ page entry.
|
||||
* Flushing the cache lines for U0 only isn't enough.
|
||||
* We need to flush for P1 too, which may contain aliases.
|
||||
*/
|
||||
static void sh7705_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
|
||||
/*
|
||||
* We could call flush_cache_page for the pages of these range,
|
||||
* but it's not efficient (scan the caches all the time...).
|
||||
*
|
||||
* We can't use A-bit magic, as there's the case we don't have
|
||||
* valid entry on TLB.
|
||||
*/
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back and invalidate I/D-caches for the page.
|
||||
*
|
||||
* ADDRESS: Virtual Address (U0 address)
|
||||
*/
|
||||
static void sh7705_flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long pfn)
|
||||
static void sh7705_flush_cache_page(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long pfn = data->addr2;
|
||||
|
||||
__flush_dcache_page(pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -198,20 +176,19 @@ static void sh7705_flush_cache_page(struct vm_area_struct *vma,
|
|||
* Not entirely sure why this is necessary on SH3 with 32K cache but
|
||||
* without it we get occasional "Memory fault" when loading a program.
|
||||
*/
|
||||
static void sh7705_flush_icache_page(struct vm_area_struct *vma,
|
||||
struct page *page)
|
||||
static void sh7705_flush_icache_page(void *page)
|
||||
{
|
||||
__flush_purge_region(page_address(page), PAGE_SIZE);
|
||||
}
|
||||
|
||||
void __init sh7705_cache_init(void)
|
||||
{
|
||||
flush_icache_range = sh7705_flush_icache_range;
|
||||
flush_dcache_page = sh7705_flush_dcache_page;
|
||||
flush_cache_all = sh7705_flush_cache_all;
|
||||
flush_cache_mm = sh7705_flush_cache_mm;
|
||||
flush_cache_dup_mm = sh7705_flush_cache_mm;
|
||||
flush_cache_range = sh7705_flush_cache_range;
|
||||
flush_cache_page = sh7705_flush_cache_page;
|
||||
flush_icache_page = sh7705_flush_icache_page;
|
||||
local_flush_icache_range = sh7705_flush_icache_range;
|
||||
local_flush_dcache_page = sh7705_flush_dcache_page;
|
||||
local_flush_cache_all = sh7705_flush_cache_all;
|
||||
local_flush_cache_mm = sh7705_flush_cache_all;
|
||||
local_flush_cache_dup_mm = sh7705_flush_cache_all;
|
||||
local_flush_cache_range = sh7705_flush_cache_all;
|
||||
local_flush_cache_page = sh7705_flush_cache_page;
|
||||
local_flush_icache_page = sh7705_flush_icache_page;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* arch/sh/mm/pg-mmu.c
|
||||
* arch/sh/mm/cache.c
|
||||
*
|
||||
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
|
||||
* Copyright (C) 2002 - 2009 Paul Mundt
|
||||
|
@ -10,63 +10,26 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void (*flush_cache_all)(void);
|
||||
void (*flush_cache_mm)(struct mm_struct *mm);
|
||||
void (*flush_cache_dup_mm)(struct mm_struct *mm);
|
||||
void (*flush_cache_page)(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long pfn);
|
||||
void (*flush_cache_range)(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
void (*flush_dcache_page)(struct page *page);
|
||||
void (*flush_icache_range)(unsigned long start, unsigned long end);
|
||||
void (*flush_icache_page)(struct vm_area_struct *vma,
|
||||
struct page *page);
|
||||
void (*flush_cache_sigtramp)(unsigned long address);
|
||||
void (*local_flush_cache_all)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_mm)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_page)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_range)(void *args) = cache_noop;
|
||||
void (*local_flush_dcache_page)(void *args) = cache_noop;
|
||||
void (*local_flush_icache_range)(void *args) = cache_noop;
|
||||
void (*local_flush_icache_page)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
|
||||
|
||||
void (*__flush_wback_region)(void *start, int size);
|
||||
void (*__flush_purge_region)(void *start, int size);
|
||||
void (*__flush_invalidate_region)(void *start, int size);
|
||||
|
||||
static inline void noop_flush_cache_all(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void noop_flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void noop_flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long pfn)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void noop_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void noop_flush_dcache_page(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void noop_flush_icache_range(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void noop_flush_icache_page(struct vm_area_struct *vma,
|
||||
struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void noop_flush_cache_sigtramp(unsigned long address)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void noop__flush_region(void *start, int size)
|
||||
{
|
||||
}
|
||||
|
@ -184,6 +147,72 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
|
|||
}
|
||||
}
|
||||
|
||||
void flush_cache_all(void)
|
||||
{
|
||||
on_each_cpu(local_flush_cache_all, NULL, 1);
|
||||
}
|
||||
|
||||
void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
on_each_cpu(local_flush_cache_mm, mm, 1);
|
||||
}
|
||||
|
||||
void flush_cache_dup_mm(struct mm_struct *mm)
|
||||
{
|
||||
on_each_cpu(local_flush_cache_dup_mm, mm, 1);
|
||||
}
|
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
struct flusher_data data;
|
||||
|
||||
data.vma = vma;
|
||||
data.addr1 = addr;
|
||||
data.addr2 = pfn;
|
||||
|
||||
on_each_cpu(local_flush_cache_page, (void *)&data, 1);
|
||||
}
|
||||
|
||||
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct flusher_data data;
|
||||
|
||||
data.vma = vma;
|
||||
data.addr1 = start;
|
||||
data.addr2 = end;
|
||||
|
||||
on_each_cpu(local_flush_cache_range, (void *)&data, 1);
|
||||
}
|
||||
|
||||
void flush_dcache_page(struct page *page)
|
||||
{
|
||||
on_each_cpu(local_flush_dcache_page, page, 1);
|
||||
}
|
||||
|
||||
void flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct flusher_data data;
|
||||
|
||||
data.vma = NULL;
|
||||
data.addr1 = start;
|
||||
data.addr2 = end;
|
||||
|
||||
on_each_cpu(local_flush_icache_range, (void *)&data, 1);
|
||||
}
|
||||
|
||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||
{
|
||||
/* Nothing uses the VMA, so just pass the struct page along */
|
||||
on_each_cpu(local_flush_icache_page, page, 1);
|
||||
}
|
||||
|
||||
void flush_cache_sigtramp(unsigned long address)
|
||||
{
|
||||
on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
|
||||
}
|
||||
|
||||
static void compute_alias(struct cache_info *c)
|
||||
{
|
||||
c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
|
||||
|
@ -230,16 +259,6 @@ void __init cpu_cache_init(void)
|
|||
compute_alias(&boot_cpu_data.dcache);
|
||||
compute_alias(&boot_cpu_data.scache);
|
||||
|
||||
flush_cache_all = noop_flush_cache_all;
|
||||
flush_cache_mm = noop_flush_cache_mm;
|
||||
flush_cache_dup_mm = noop_flush_cache_mm;
|
||||
flush_cache_page = noop_flush_cache_page;
|
||||
flush_cache_range = noop_flush_cache_range;
|
||||
flush_dcache_page = noop_flush_dcache_page;
|
||||
flush_icache_range = noop_flush_icache_range;
|
||||
flush_icache_page = noop_flush_icache_page;
|
||||
flush_cache_sigtramp = noop_flush_cache_sigtramp;
|
||||
|
||||
__flush_wback_region = noop__flush_region;
|
||||
__flush_purge_region = noop__flush_region;
|
||||
__flush_invalidate_region = noop__flush_region;
|
||||
|
|
Loading…
Reference in New Issue
Block a user