forked from luck/tmp_suning_uos_patched
5f0fbf9eca
This is the minimum fixmap interface expected to be implemented by architectures supporting highmem. We have a second level page table already allocated and covering 0xfff00000-0xffffffff because the exception vector page is located at 0xffff0000, and various cache tricks already use some entries above 0xffff0000. Therefore the PTEs covering 0xfff00000-0xfffeffff are free to be used. However the XScale cache flushing code already uses virtual addresses between 0xfffe0000 and 0xfffeffff. So this reserves the 0xfff00000-0xfffdffff range for fixmap stuff. The Documentation/arm/memory.txt information is updated accordingly, including the information about the actual top of DMA memory mapping region which didn't match the code. Signed-off-by: Nicolas Pitre <nico@marvell.com>
36 lines
696 B
C
36 lines
696 B
C
#ifdef CONFIG_MMU
|
|
|
|
/* the upper-most page table pointer */
|
|
extern pmd_t *top_pmd;
|
|
|
|
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
|
|
|
|
static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
|
|
{
|
|
return pmd_offset(pgd, virt);
|
|
}
|
|
|
|
static inline pmd_t *pmd_off_k(unsigned long virt)
|
|
{
|
|
return pmd_off(pgd_offset_k(virt), virt);
|
|
}
|
|
|
|
struct mem_type {
|
|
unsigned int prot_pte;
|
|
unsigned int prot_l1;
|
|
unsigned int prot_sect;
|
|
unsigned int domain;
|
|
};
|
|
|
|
const struct mem_type *get_mem_type(unsigned int type);
|
|
|
|
#endif
|
|
|
|
struct map_desc;
|
|
struct meminfo;
|
|
struct pglist_data;
|
|
|
|
void __init create_mapping(struct map_desc *md);
|
|
void __init bootmem_init(void);
|
|
void reserve_node_zero(struct pglist_data *pgdat);
|