kernel_optimize_test/arch/arm/mm/copypage-xscale.c
Mike Rapoport e31cf2f4ca mm: don't include asm/pgtable.h if linux/mm.h is already included
Patch series "mm: consolidate definitions of page table accessors", v2.

The low level page table accessors (pXY_index(), pXY_offset()) are
duplicated across all architectures and sometimes more than once.  For
instance, we have 31 definition of pgd_offset() for 25 supported
architectures.

Most of these definitions are actually identical and typically it boils
down to, e.g.

static inline unsigned long pmd_index(unsigned long address)
{
        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}

static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
        return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}

These definitions can be shared among 90% of the arches provided
XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined.

For architectures that really need a custom version there is always
possibility to override the generic version with the usual ifdefs magic.

These patches introduce include/linux/pgtable.h that replaces
include/asm-generic/pgtable.h and add the definitions of the page table
accessors to the new header.

This patch (of 12):

The linux/mm.h header includes <asm/pgtable.h> to allow inlining of the
functions involving page table manipulations, e.g.  pte_alloc() and
pmd_alloc().  So, there is no point to explicitly include <asm/pgtable.h>
in the files that include <linux/mm.h>.

The include statements in such cases are remove with a simple loop:

	for f in $(git grep -l "include <linux/mm.h>") ; do
		sed -i -e '/include <asm\/pgtable.h>/ d' $f
	done

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org
Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-09 09:39:13 -07:00

133 lines
3.5 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/lib/copypage-xscale.S
*
* Copyright (C) 1995-2005 Russell King
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include "mm.h"
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
static DEFINE_RAW_SPINLOCK(minicache_lock);
/*
* XScale mini-dcache optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*/
static void mc_copy_user_page(void *from, void *to)
{
int tmp;
/*
* Strangely enough, best performance is achieved
* when prefetching destination as well. (NP)
*/
asm volatile ("\
.arch xscale \n\
pld [%0, #0] \n\
pld [%0, #32] \n\
pld [%1, #0] \n\
pld [%1, #32] \n\
1: pld [%0, #64] \n\
pld [%0, #96] \n\
pld [%1, #64] \n\
pld [%1, #96] \n\
2: ldrd r2, r3, [%0], #8 \n\
ldrd r4, r5, [%0], #8 \n\
mov ip, %1 \n\
strd r2, r3, [%1], #8 \n\
ldrd r2, r3, [%0], #8 \n\
strd r4, r5, [%1], #8 \n\
ldrd r4, r5, [%0], #8 \n\
strd r2, r3, [%1], #8 \n\
strd r4, r5, [%1], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
ldrd r2, r3, [%0], #8 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
ldrd r4, r5, [%0], #8 \n\
mov ip, %1 \n\
strd r2, r3, [%1], #8 \n\
ldrd r2, r3, [%0], #8 \n\
strd r4, r5, [%1], #8 \n\
ldrd r4, r5, [%0], #8 \n\
strd r2, r3, [%1], #8 \n\
strd r4, r5, [%1], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
subs %2, %2, #1 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
bgt 1b \n\
beq 2b "
: "+&r" (from), "+&r" (to), "=&r" (tmp)
: "2" (PAGE_SIZE / 64 - 1)
: "r2", "r3", "r4", "r5", "ip");
}
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock);
set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto);
}
/*
* XScale optimised clear_user_page
*/
void
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
.arch xscale \n\
mov r1, %2 \n\
mov r2, #0 \n\
mov r3, #0 \n\
1: mov ip, %0 \n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
subs r1, r1, #1 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
bne 1b"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip");
kunmap_atomic(kaddr);
}
struct cpu_user_fns xscale_mc_user_fns __initdata = {
.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
.cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
};