forked from luck/tmp_suning_uos_patched
52142e756e
On platforms doing non-coherent DMA (4xx, 8xx, ...), it's important that the kmalloc minimum alignment is set to the cache line size, to avoid sharing cache lines between different objects, so that DMA to one of the objects doesn't corrupt the other. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
37 lines
924 B
C
37 lines
924 B
C
#ifndef _ASM_POWERPC_PAGE_32_H
|
|
#define _ASM_POWERPC_PAGE_32_H
|
|
#ifdef __KERNEL__
|
|
|
|
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
|
|
|
|
#define PPC_MEMSTART 0
|
|
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
/*
|
|
* The basic type of a PTE - 64 bits for those CPUs with > 32 bit
|
|
* physical addressing. For now this just the IBM PPC440.
|
|
*/
|
|
#ifdef CONFIG_PTE_64BIT
|
|
typedef unsigned long long pte_basic_t;
|
|
#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
|
|
#else
|
|
typedef unsigned long pte_basic_t;
|
|
#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
|
|
#endif
|
|
|
|
struct page;
|
|
extern void clear_pages(void *page, int order);
|
|
static inline void clear_page(void *page) { clear_pages(page, 0); }
|
|
extern void copy_page(void *to, void *from);
|
|
|
|
#include <asm-generic/page.h>
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_PAGE_32_H */
|