forked from luck/tmp_suning_uos_patched
spelling fixes: mm/
Spelling fixes in mm/. Signed-off-by: Simon Arlott <simon@fire.lp0.eu> Signed-off-by: Adrian Bunk <bunk@kernel.org>
This commit is contained in:
parent
676b1855de
commit
183ff22bb6
|
@ -1017,7 +1017,7 @@ static long region_chg(struct list_head *head, long f, long t)
|
|||
|
||||
/* If we are below the current region then a new region is required.
|
||||
* Subtle, allocate a new region at the position but make it zero
|
||||
* size such that we can guarentee to record the reservation. */
|
||||
* size such that we can guarantee to record the reservation. */
|
||||
if (&rg->link == head || t < rg->from) {
|
||||
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
|
||||
if (!nrg)
|
||||
|
|
|
@ -2713,7 +2713,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
|
|||
return 0;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
/* ignore errors, just check how much was sucessfully transfered */
|
||||
/* ignore errors, just check how much was successfully transferred */
|
||||
while (len) {
|
||||
int bytes, ret, offset;
|
||||
void *maddr;
|
||||
|
|
|
@ -121,7 +121,7 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
|
|||
err = __add_section(zone, i << PFN_SECTION_SHIFT);
|
||||
|
||||
/*
|
||||
* EEXIST is finally dealed with by ioresource collision
|
||||
* EEXIST is finally dealt with by ioresource collision
|
||||
* check. see add_memory() => register_memory_resource()
|
||||
* Warning will be printed if there is collision.
|
||||
*/
|
||||
|
|
|
@ -299,7 +299,7 @@ EXPORT_SYMBOL(mempool_free_slab);
|
|||
|
||||
/*
|
||||
* A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
|
||||
* specfied by pool_data
|
||||
* specified by pool_data
|
||||
*/
|
||||
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
|
||||
{
|
||||
|
|
|
@ -989,7 +989,7 @@ int __set_page_dirty_no_writeback(struct page *page)
|
|||
* mapping is pinned by the vma's ->vm_file reference.
|
||||
*
|
||||
* We take care to handle the case where the page was truncated from the
|
||||
* mapping by re-checking page_mapping() insode tree_lock.
|
||||
* mapping by re-checking page_mapping() inside tree_lock.
|
||||
*/
|
||||
int __set_page_dirty_nobuffers(struct page *page)
|
||||
{
|
||||
|
|
|
@ -123,7 +123,7 @@ static unsigned long __meminitdata dma_reserve;
|
|||
|
||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
||||
/*
|
||||
* MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
|
||||
* MAX_ACTIVE_REGIONS determines the maximum number of distinct
|
||||
* ranges of memory (RAM) that may be registered with add_active_range().
|
||||
* Ranges passed to add_active_range() will be merged if possible
|
||||
* so the number of times add_active_range() can be called is
|
||||
|
@ -1260,7 +1260,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|||
* skip over zones that are not allowed by the cpuset, or that have
|
||||
* been recently (in last second) found to be nearly full. See further
|
||||
* comments in mmzone.h. Reduces cache footprint of zonelist scans
|
||||
* that have to skip over alot of full or unallowed zones.
|
||||
* that have to skip over a lot of full or unallowed zones.
|
||||
*
|
||||
* If the zonelist cache is present in the passed in zonelist, then
|
||||
* returns a pointer to the allowed node mask (either the current
|
||||
|
@ -2358,7 +2358,7 @@ void build_all_zonelists(void)
|
|||
__build_all_zonelists(NULL);
|
||||
cpuset_init_current_mems_allowed();
|
||||
} else {
|
||||
/* we have to stop all cpus to guaranntee there is no user
|
||||
/* we have to stop all cpus to guarantee there is no user
|
||||
of zonelist */
|
||||
stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
|
||||
/* cpuset refresh routine should be here */
|
||||
|
@ -2864,7 +2864,7 @@ static int __meminit first_active_region_index_in_nid(int nid)
|
|||
|
||||
/*
|
||||
* Basic iterator support. Return the next active range of PFNs for a node
|
||||
* Note: nid == MAX_NUMNODES returns next region regardles of node
|
||||
* Note: nid == MAX_NUMNODES returns next region regardless of node
|
||||
*/
|
||||
static int __meminit next_active_region_index_in_nid(int index, int nid)
|
||||
{
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
* Radix priority search tree for address_space->i_mmap
|
||||
*
|
||||
* For each vma that map a unique set of file pages i.e., unique [radix_index,
|
||||
* heap_index] value, we have a corresponing priority search tree node. If
|
||||
* heap_index] value, we have a corresponding priority search tree node. If
|
||||
* multiple vmas have identical [radix_index, heap_index] value, then one of
|
||||
* them is used as a tree node and others are stored in a vm_set list. The tree
|
||||
* node points to the first vma (head) of the list using vm_set.head.
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
* initialized objects.
|
||||
*
|
||||
* This means, that your constructor is used only for newly allocated
|
||||
* slabs and you must pass objects with the same intializations to
|
||||
* slabs and you must pass objects with the same initializations to
|
||||
* kmem_cache_free.
|
||||
*
|
||||
* Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
|
||||
|
@ -1369,7 +1369,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
|
|||
* structure is usually allocated from kmem_cache_create() and
|
||||
* gets destroyed at kmem_cache_destroy().
|
||||
*/
|
||||
/* fall thru */
|
||||
/* fall through */
|
||||
#endif
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
|
@ -3806,7 +3806,7 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
|
|||
EXPORT_SYMBOL_GPL(kmem_cache_name);
|
||||
|
||||
/*
|
||||
* This initializes kmem_list3 or resizes varioius caches for all nodes.
|
||||
* This initializes kmem_list3 or resizes various caches for all nodes.
|
||||
*/
|
||||
static int alloc_kmemlist(struct kmem_cache *cachep)
|
||||
{
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*/
|
||||
|
||||
/*
|
||||
* This file contains the default values for the opereation of the
|
||||
* This file contains the default values for the operation of the
|
||||
* Linux VM subsystem. Fine-tuning documentation can be found in
|
||||
* Documentation/sysctl/vm.txt.
|
||||
* Started 18.12.91
|
||||
|
|
|
@ -247,7 +247,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
|||
EXPORT_SYMBOL_GPL(__get_vm_area);
|
||||
|
||||
/**
|
||||
* get_vm_area - reserve a contingous kernel virtual area
|
||||
* get_vm_area - reserve a contiguous kernel virtual area
|
||||
* @size: size of the area
|
||||
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
|
||||
*
|
||||
|
@ -303,7 +303,7 @@ static struct vm_struct *__remove_vm_area(void *addr)
|
|||
}
|
||||
|
||||
/**
|
||||
* remove_vm_area - find and remove a contingous kernel virtual area
|
||||
* remove_vm_area - find and remove a continuous kernel virtual area
|
||||
* @addr: base address
|
||||
*
|
||||
* Search for the kernel VM area starting at @addr, and remove it.
|
||||
|
@ -364,7 +364,7 @@ static void __vunmap(void *addr, int deallocate_pages)
|
|||
* vfree - release memory allocated by vmalloc()
|
||||
* @addr: memory base address
|
||||
*
|
||||
* Free the virtually contiguous memory area starting at @addr, as
|
||||
* Free the virtually continuous memory area starting at @addr, as
|
||||
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
|
||||
* NULL, no operation is performed.
|
||||
*
|
||||
|
|
|
@ -141,7 +141,7 @@ EXPORT_SYMBOL(unregister_shrinker);
|
|||
* percentages of the lru and ageable caches. This should balance the seeks
|
||||
* generated by these structures.
|
||||
*
|
||||
* If the vm encounted mapped pages on the LRU it increase the pressure on
|
||||
* If the vm encountered mapped pages on the LRU it increase the pressure on
|
||||
* slab to avoid swapping.
|
||||
*
|
||||
* We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
|
||||
|
|
Loading…
Reference in New Issue
Block a user