dma-mapping: handle vmalloc addresses in dma_common_{mmap,get_sgtable}

[ Upstream commit 40ac971eab89330d6153e7721e88acd2d98833f9 ]

xen-swiotlb can use vmalloc backed addresses for dma coherent allocations
and uses the common helpers.  Properly handle them to unbreak Xen on
ARM platforms.

Fixes: 1b65c4e5a9 ("swiotlb-xen: use xen_alloc/free_coherent_pages")
Signed-off-by: Roman Skakun <roman_skakun@epam.com>
Reviewed-by: Andrii Anisov <andrii_anisov@epam.com>
[hch: split the patch, renamed the helpers]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Roman Skakun 2021-07-16 11:39:34 +03:00 committed by Greg Kroah-Hartman
parent 115e4f5b64
commit 8983766903

View File

@ -5,6 +5,13 @@
*/ */
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
static struct page *dma_common_vaddr_to_page(void *cpu_addr)
{
if (is_vmalloc_addr(cpu_addr))
return vmalloc_to_page(cpu_addr);
return virt_to_page(cpu_addr);
}
/* /*
* Create scatter-list for the already allocated DMA buffer. * Create scatter-list for the already allocated DMA buffer.
*/ */
@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
struct page *page = virt_to_page(cpu_addr); struct page *page = dma_common_vaddr_to_page(cpu_addr);
int ret; int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL); ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long user_count = vma_pages(vma); unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff; unsigned long off = vma->vm_pgoff;
struct page *page = dma_common_vaddr_to_page(cpu_addr);
int ret = -ENXIO; int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return -ENXIO; return -ENXIO;
return remap_pfn_range(vma, vma->vm_start, return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, page_to_pfn(page) + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot); user_count << PAGE_SHIFT, vma->vm_page_prot);
#else #else
return -ENXIO; return -ENXIO;