forked from luck/tmp_suning_uos_patched
ion: optimize ion_heap_buffer_zero
ion_heap_buffer_zero can spend a long time in unmap_kernel_range if it has to broadcast a tlb flush to every cpu for every page. Modify it to batch pages into a larger region to clear using a single mapping. This may cause the mapping size to change if the buffer size is not a multiple of the mapping size, so switch to allocating the address space for each chunk. This allows us to use vm_map_ram to handle the allocation and mapping together. The number of pages to zero using a single mapping is set to 32 to hit the fastpath in vm_map_ram. Signed-off-by: Colin Cross <ccross@android.com> Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
f0f0676382
commit
8b312bb9a7
@ -100,40 +100,48 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
|
||||
{
|
||||
void *addr = vm_map_ram(pages, num, -1, pgprot);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
memset(addr, 0, PAGE_SIZE * num);
|
||||
vm_unmap_ram(addr, num);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ion_heap_buffer_zero(struct ion_buffer *buffer)
|
||||
{
|
||||
struct sg_table *table = buffer->sg_table;
|
||||
pgprot_t pgprot;
|
||||
struct scatterlist *sg;
|
||||
struct vm_struct *vm_struct;
|
||||
int i, j, ret = 0;
|
||||
struct page *pages[32];
|
||||
int k = 0;
|
||||
|
||||
if (buffer->flags & ION_FLAG_CACHED)
|
||||
pgprot = PAGE_KERNEL;
|
||||
else
|
||||
pgprot = pgprot_writecombine(PAGE_KERNEL);
|
||||
|
||||
vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
|
||||
if (!vm_struct)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
unsigned long len = sg->length;
|
||||
|
||||
for (j = 0; j < len / PAGE_SIZE; j++) {
|
||||
struct page *sub_page = page + j;
|
||||
struct page **pages = &sub_page;
|
||||
ret = map_vm_area(vm_struct, pgprot, &pages);
|
||||
if (ret)
|
||||
goto end;
|
||||
memset(vm_struct->addr, 0, PAGE_SIZE);
|
||||
unmap_kernel_range((unsigned long)vm_struct->addr,
|
||||
PAGE_SIZE);
|
||||
pages[k++] = page + j;
|
||||
if (k == ARRAY_SIZE(pages)) {
|
||||
ret = ion_heap_clear_pages(pages, k, pgprot);
|
||||
if (ret)
|
||||
goto end;
|
||||
k = 0;
|
||||
}
|
||||
}
|
||||
if (k)
|
||||
ret = ion_heap_clear_pages(pages, k, pgprot);
|
||||
}
|
||||
end:
|
||||
free_vm_area(vm_struct);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user