forked from luck/tmp_suning_uos_patched
c605782b1c
This patch moves the definition of the PTE format for each MMU type to separate files instead of all in one file. This improves overall maintainability and will make it easier to add new types. On 64-bit, additionally, I've separated the headers relative to the format of the page table tree (3 vs. 4 levels for 64K vs 4K pages) from the headers specific to the PTE format for hash based processors, this will make it easier to add support for Book3 "E" 64-bit implementations. There are still some type-related ifdef's in the generic headers, we might remove them in the long run, but this patch shouldn't result in any code change, -hopefully- just definitions being moved around. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
75 lines
2.5 KiB
C
75 lines
2.5 KiB
C
#ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H
|
|
#define _ASM_POWERPC_PGTABLE_PPC64_4K_H
|
|
/*
|
|
* Entries per page directory level. The PTE level must use a 64b record
|
|
* for each page table entry. The PMD and PGD level use a 32b record for
|
|
* each entry by assuming that each entry is page aligned.
|
|
*/
|
|
#define PTE_INDEX_SIZE 9
|
|
#define PMD_INDEX_SIZE 7
|
|
#define PUD_INDEX_SIZE 7
|
|
#define PGD_INDEX_SIZE 9
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
|
|
#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
|
|
#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
|
|
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
|
|
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
|
|
#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
|
|
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
|
|
|
|
/* PMD_SHIFT determines what a second-level page table entry can map */
|
|
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
|
|
|
/* With 4k base page size, hugepage PTEs go at the PMD level */
|
|
#define MIN_HUGEPTE_SHIFT PMD_SHIFT
|
|
|
|
/* PUD_SHIFT determines what a third-level page table entry can map */
|
|
#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
|
|
#define PUD_SIZE (1UL << PUD_SHIFT)
|
|
#define PUD_MASK (~(PUD_SIZE-1))
|
|
|
|
/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
|
|
#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
|
|
/* Bits to mask out from a PMD to get to the PTE page */
|
|
#define PMD_MASKED_BITS 0
|
|
/* Bits to mask out from a PUD to get to the PMD page */
|
|
#define PUD_MASKED_BITS 0
|
|
/* Bits to mask out from a PGD to get to the PUD page */
|
|
#define PGD_MASKED_BITS 0
|
|
|
|
|
|
/*
|
|
* 4-level page tables related bits
|
|
*/
|
|
|
|
#define pgd_none(pgd) (!pgd_val(pgd))
|
|
#define pgd_bad(pgd) (pgd_val(pgd) == 0)
|
|
#define pgd_present(pgd) (pgd_val(pgd) != 0)
|
|
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
|
|
#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
|
|
#define pgd_page(pgd) virt_to_page(pgd_page_vaddr(pgd))
|
|
|
|
#define pud_offset(pgdp, addr) \
|
|
(((pud_t *) pgd_page_vaddr(*(pgdp))) + \
|
|
(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
|
|
|
|
#define pud_ERROR(e) \
|
|
printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
|
|
|
|
/*
|
|
* On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
|
|
#define remap_4k_pfn(vma, addr, pfn, prot) \
|
|
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
|
|
|
|
#endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */
|