forked from luck/tmp_suning_uos_patched
microblaze: Fix mmap for cache coherent memory
When running in non-cache coherent configuration the memory that was allocated with dma_alloc_coherent() has a custom mapping and so there is no 1-to-1 relationship between the kernel virtual address and the PFN. This means that virt_to_pfn() will not work correctly for those addresses and the default mmap implementation in the form of dma_common_mmap() will map some random, but not the requested, memory area. Fix this by providing a custom mmap implementation that looks up the PFN from the page table rather than using virt_to_pfn. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Michal Simek <michal.simek@xilinx.com>
This commit is contained in:
parent
b2776bf714
commit
3a8e326517
|
@ -565,6 +565,7 @@ void consistent_free(size_t size, void *vaddr);
|
|||
void consistent_sync(void *vaddr, size_t size, int direction);
|
||||
void consistent_sync_page(struct page *page, unsigned long offset,
|
||||
size_t size, int direction);
|
||||
unsigned long consistent_virt_to_pfn(void *vaddr);
|
||||
|
||||
void setup_memory(void);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -154,9 +154,36 @@ dma_direct_sync_sg_for_device(struct device *dev,
|
|||
__dma_sync(sg->dma_address, sg->length, direction);
|
||||
}
|
||||
|
||||
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
unsigned long pfn;
|
||||
|
||||
if (off >= count || user_count > (count - off))
|
||||
return -ENXIO;
|
||||
|
||||
#ifdef NOT_COHERENT_CACHE
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
pfn = consistent_virt_to_pfn(cpu_addr);
|
||||
#else
|
||||
pfn = virt_to_pfn(cpu_addr);
|
||||
#endif
|
||||
return remap_pfn_range(vma, vma->vm_start, pfn + off,
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot);
|
||||
#else
|
||||
return -ENXIO;
|
||||
#endif
|
||||
}
|
||||
|
||||
struct dma_map_ops dma_direct_ops = {
|
||||
.alloc = dma_direct_alloc_coherent,
|
||||
.free = dma_direct_free_coherent,
|
||||
.mmap = dma_direct_mmap_coherent,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.dma_supported = dma_direct_dma_supported,
|
||||
.map_page = dma_direct_map_page,
|
||||
|
|
|
@ -156,6 +156,25 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
|
|||
}
|
||||
EXPORT_SYMBOL(consistent_alloc);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static pte_t *consistent_virt_to_pte(void *vaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)vaddr;
|
||||
|
||||
return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
|
||||
}
|
||||
|
||||
unsigned long consistent_virt_to_pfn(void *vaddr)
|
||||
{
|
||||
pte_t *ptep = consistent_virt_to_pte(vaddr);
|
||||
|
||||
if (pte_none(*ptep) || !pte_present(*ptep))
|
||||
return 0;
|
||||
|
||||
return pte_pfn(*ptep);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* free page(s) as defined by the above mapping.
|
||||
*/
|
||||
|
@ -181,13 +200,9 @@ void consistent_free(size_t size, void *vaddr)
|
|||
} while (size -= PAGE_SIZE);
|
||||
#else
|
||||
do {
|
||||
pte_t *ptep;
|
||||
pte_t *ptep = consistent_virt_to_pte(vaddr);
|
||||
unsigned long pfn;
|
||||
|
||||
ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
|
||||
(unsigned int)vaddr),
|
||||
(unsigned int)vaddr),
|
||||
(unsigned int)vaddr);
|
||||
if (!pte_none(*ptep) && pte_present(*ptep)) {
|
||||
pfn = pte_pfn(*ptep);
|
||||
pte_clear(&init_mm, (unsigned int)vaddr, ptep);
|
||||
|
|
Loading…
Reference in New Issue
Block a user