forked from luck/tmp_suning_uos_patched
[ARM] 5534/1: kmalloc must return a cache line aligned buffer
Define ARCH_KMALLOC_MINALIGN in asm/cache.h At the request of Russell also move ARCH_SLAB_MINALIGN to this file. Signed-off-by: Martin Fuzzey <mfuzzey@gmail.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
d9244b5d2f
commit
eb5f4ca953
|
@ -7,4 +7,20 @@
|
|||
#define L1_CACHE_SHIFT 5
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
/*
|
||||
* Memory returned by kmalloc() may be used for DMA, so we must make
|
||||
* sure that all such allocations are cache aligned. Otherwise,
|
||||
* unrelated code may cause parts of the buffer to be read into the
|
||||
* cache before the transfer is done, causing old data to be seen by
|
||||
* the CPU.
|
||||
*/
|
||||
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
|
||||
|
||||
/*
|
||||
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
|
||||
*/
|
||||
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
|
||||
#define ARCH_SLAB_MINALIGN 8
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -202,13 +202,6 @@ typedef struct page *pgtable_t;
|
|||
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
|
||||
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
|
||||
/*
|
||||
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
|
||||
*/
|
||||
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
|
||||
#define ARCH_SLAB_MINALIGN 8
|
||||
#endif
|
||||
|
||||
#include <asm-generic/page.h>
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue
Block a user