forked from luck/tmp_suning_uos_patched
x86: convert CPA users to the new set_page_ API
This patch converts various users of change_page_attr() to the new, more intent driven set_page_*/set_memory_* API set. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
75cbade8ea
commit
6d238cc4dc
|
@ -396,10 +396,10 @@ static void __init runtime_code_page_mkexec(void)
|
|||
md = p;
|
||||
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
|
||||
if (md->type == EFI_RUNTIME_SERVICES_CODE &&
|
||||
(end >> PAGE_SHIFT) <= max_pfn_mapped)
|
||||
change_page_attr_addr(md->virt_addr,
|
||||
md->num_pages,
|
||||
PAGE_KERNEL_EXEC_NOCACHE);
|
||||
(end >> PAGE_SHIFT) <= max_pfn_mapped) {
|
||||
set_memory_x(md->virt_addr, md->num_pages);
|
||||
set_memory_uc(md->virt_addr, md->num_pages);
|
||||
}
|
||||
}
|
||||
__flush_tlb_all();
|
||||
}
|
||||
|
|
|
@ -570,8 +570,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
|
|||
gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
|
||||
if (!gatt)
|
||||
panic("Cannot allocate GATT table");
|
||||
if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT,
|
||||
PAGE_KERNEL_NOCACHE))
|
||||
if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
|
||||
panic("Could not set GART PTEs to uncacheable pages");
|
||||
global_flush_tlb();
|
||||
|
||||
|
|
|
@ -770,34 +770,30 @@ void mark_rodata_ro(void)
|
|||
if (num_possible_cpus() <= 1)
|
||||
#endif
|
||||
{
|
||||
change_page_attr(virt_to_page(start),
|
||||
size >> PAGE_SHIFT, PAGE_KERNEL_RX);
|
||||
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
|
||||
printk("Write protecting the kernel text: %luk\n", size >> 10);
|
||||
|
||||
#ifdef CONFIG_CPA_DEBUG
|
||||
global_flush_tlb();
|
||||
|
||||
printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
|
||||
change_page_attr(virt_to_page(start), size>>PAGE_SHIFT,
|
||||
PAGE_KERNEL_EXEC);
|
||||
set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
|
||||
global_flush_tlb();
|
||||
|
||||
printk("Testing CPA: write protecting again\n");
|
||||
change_page_attr(virt_to_page(start), size>>PAGE_SHIFT,
|
||||
PAGE_KERNEL_RX);
|
||||
set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
|
||||
global_flush_tlb();
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
start += size;
|
||||
size = (unsigned long)__end_rodata - start;
|
||||
change_page_attr(virt_to_page(start),
|
||||
size >> PAGE_SHIFT, PAGE_KERNEL_RO);
|
||||
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
|
||||
printk("Write protecting the kernel read-only data: %luk\n",
|
||||
size >> 10);
|
||||
|
||||
/*
|
||||
* change_page_attr() requires a global_flush_tlb() call after it.
|
||||
* set_pages_*() requires a global_flush_tlb() call after it.
|
||||
* We do this after the printk so that if something went wrong in the
|
||||
* change, the printk gets out at least to give a better debug hint
|
||||
* of who is the culprit.
|
||||
|
@ -806,13 +802,11 @@ void mark_rodata_ro(void)
|
|||
|
||||
#ifdef CONFIG_CPA_DEBUG
|
||||
printk("Testing CPA: undo %lx-%lx\n", start, start + size);
|
||||
change_page_attr(virt_to_page(start), size >> PAGE_SHIFT,
|
||||
PAGE_KERNEL);
|
||||
set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
|
||||
global_flush_tlb();
|
||||
|
||||
printk("Testing CPA: write protecting again\n");
|
||||
change_page_attr(virt_to_page(start), size >> PAGE_SHIFT,
|
||||
PAGE_KERNEL_RO);
|
||||
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
|
||||
global_flush_tlb();
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -556,8 +556,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
|
|||
init_page_count(virt_to_page(addr));
|
||||
memset((void *)(addr & ~(PAGE_SIZE-1)),
|
||||
POISON_FREE_INITMEM, PAGE_SIZE);
|
||||
if (addr >= __START_KERNEL_map)
|
||||
change_page_attr_addr(addr, 1, __pgprot(0));
|
||||
free_page(addr);
|
||||
totalram_pages++;
|
||||
}
|
||||
|
@ -594,13 +592,13 @@ void mark_rodata_ro(void)
|
|||
if (end <= start)
|
||||
return;
|
||||
|
||||
change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
|
||||
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
|
||||
|
||||
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
|
||||
(end - start) >> 10);
|
||||
|
||||
/*
|
||||
* change_page_attr_addr() requires a global_flush_tlb() call after it.
|
||||
* set_memory_*() requires a global_flush_tlb() call after it.
|
||||
* We do this after the printk so that if something went wrong in the
|
||||
* change, the printk gets out at least to give a better debug hint
|
||||
* of who is the culprit.
|
||||
|
@ -609,11 +607,11 @@ void mark_rodata_ro(void)
|
|||
|
||||
#ifdef CONFIG_CPA_DEBUG
|
||||
printk("Testing CPA: undo %lx-%lx\n", start, end);
|
||||
change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL);
|
||||
set_memory_rw(start, (end-start) >> PAGE_SHIFT);
|
||||
global_flush_tlb();
|
||||
|
||||
printk("Testing CPA: again\n");
|
||||
change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
|
||||
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
|
||||
global_flush_tlb();
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -210,8 +210,8 @@ static void *i8xx_alloc_pages(void)
|
|||
if (page == NULL)
|
||||
return NULL;
|
||||
|
||||
if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
|
||||
change_page_attr(page, 4, PAGE_KERNEL);
|
||||
if (set_pages_uc(page, 4) < 0) {
|
||||
set_pages_wb(page, 4);
|
||||
global_flush_tlb();
|
||||
__free_pages(page, 2);
|
||||
return NULL;
|
||||
|
@ -230,7 +230,7 @@ static void i8xx_destroy_pages(void *addr)
|
|||
return;
|
||||
|
||||
page = virt_to_page(addr);
|
||||
change_page_attr(page, 4, PAGE_KERNEL);
|
||||
set_pages_wb(page, 4);
|
||||
global_flush_tlb();
|
||||
put_page(page);
|
||||
__free_pages(page, 2);
|
||||
|
|
|
@ -88,9 +88,7 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
|
|||
{
|
||||
gfp_t flags;
|
||||
unsigned long i;
|
||||
pgprot_t wc_pageprot;
|
||||
|
||||
wc_pageprot = PAGE_KERNEL_NOCACHE;
|
||||
max_order++;
|
||||
do {
|
||||
/*
|
||||
|
@ -131,8 +129,7 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
|
|||
*/
|
||||
|
||||
global_flush_tlb();
|
||||
change_page_attr(virt_to_page(va->logical), va->size >> PAGE_SHIFT,
|
||||
wc_pageprot);
|
||||
set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT);
|
||||
global_flush_tlb();
|
||||
|
||||
printk(KERN_DEBUG MODULE_NAME
|
||||
|
@ -157,8 +154,8 @@ static void vmlfb_free_vram_area(struct vram_area *va)
|
|||
* Reset the linear kernel map caching policy.
|
||||
*/
|
||||
|
||||
change_page_attr(virt_to_page(va->logical),
|
||||
va->size >> PAGE_SHIFT, PAGE_KERNEL);
|
||||
set_pages_wb(virt_to_page(va->logical),
|
||||
va->size >> PAGE_SHIFT);
|
||||
global_flush_tlb();
|
||||
|
||||
/*
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
* Caller's responsibility to call global_flush_tlb() for performance
|
||||
* reasons
|
||||
*/
|
||||
#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
|
||||
#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
|
||||
#define map_page_into_agp(page) set_pages_uc(page, 1)
|
||||
#define unmap_page_from_agp(page) set_pages_wb(page, 1)
|
||||
#define flush_agp_mappings() global_flush_tlb()
|
||||
|
||||
/*
|
||||
|
|
|
@ -711,11 +711,14 @@ static void snd_intel8x0_setup_periods(struct intel8x0 *chip, struct ichdev *ich
|
|||
static void fill_nocache(void *buf, int size, int nocache)
|
||||
{
|
||||
size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
change_page_attr(virt_to_page(buf), size, nocache ? PAGE_KERNEL_NOCACHE : PAGE_KERNEL);
|
||||
if (nocache)
|
||||
set_pages_uc(virt_to_page(buf), size);
|
||||
else
|
||||
set_pages_wb(virt_to_page(buf), size);
|
||||
global_flush_tlb();
|
||||
}
|
||||
#else
|
||||
#define fill_nocache(buf,size,nocache)
|
||||
#define fill_nocache(buf, size, nocache) do { ; } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user