powerpc: Remove some old bootmem related comments

Now bootmem is gone from powerpc we can remove comments mentioning it.

Signed-off-by: Anton Blanchard <anton@samba.org>
Tested-by: Emil Medve <Emilian.Medve@Freescale.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Anton Blanchard 2014-09-17 22:15:34 +10:00 committed by Michael Ellerman
parent 10239733ee
commit 14ed740957
5 changed files with 6 additions and 13 deletions

View File

@ -696,10 +696,7 @@ void __init early_init_devtree(void *params)
reserve_crashkernel(); reserve_crashkernel();
early_reserve_mem(); early_reserve_mem();
/* /* Ensure that total memory size is page-aligned. */
* Ensure that total memory size is page-aligned, because otherwise
* mark_bootmem() gets upset.
*/
limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
memblock_enforce_memory_limit(limit); memblock_enforce_memory_limit(limit);

View File

@ -1091,8 +1091,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
} }
/* /*
* Call early during boot, before mem init or bootmem, to retrieve the RTAS * Call early during boot, before mem init, to retrieve the RTAS
* informations from the device-tree and allocate the RMO buffer for userland * information from the device-tree and allocate the RMO buffer for userland
* accesses. * accesses.
*/ */
void __init rtas_initialize(void) void __init rtas_initialize(void)

View File

@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(kvm_release_hpt);
* kvm_cma_reserve() - reserve area for kvm hash pagetable * kvm_cma_reserve() - reserve area for kvm hash pagetable
* *
* This function reserves memory from early allocator. It should be * This function reserves memory from early allocator. It should be
* called by arch specific code once the early allocator (memblock or bootmem) * called by arch specific code once the memblock allocator
* has been activated and all other subsystems have already allocated/reserved * has been activated and all other subsystems have already allocated/reserved
* memory. * memory.
*/ */

View File

@ -276,7 +276,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
/* Build list of addresses of gigantic pages. This function is used in early /* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup. * boot before the buddy allocator is setup.
*/ */
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{ {
@ -399,7 +399,7 @@ void __init reserve_hugetlb_gpages(void)
#else /* !PPC_FSL_BOOK3E */ #else /* !PPC_FSL_BOOK3E */
/* Build list of addresses of gigantic pages. This function is used in early /* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup. * boot before the buddy allocator is setup.
*/ */
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{ {

View File

@ -109,10 +109,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
__pgprot(flags))); __pgprot(flags)));
} else { } else {
#ifdef CONFIG_PPC_MMU_NOHASH #ifdef CONFIG_PPC_MMU_NOHASH
/* Warning ! This will blow up if bootmem is not initialized
* which our ppc64 code is keen to do that, we'll need to
* fix it and/or be more careful
*/
pgdp = pgd_offset_k(ea); pgdp = pgd_offset_k(ea);
#ifdef PUD_TABLE_SIZE #ifdef PUD_TABLE_SIZE
if (pgd_none(*pgdp)) { if (pgd_none(*pgdp)) {