forked from luck/tmp_suning_uos_patched
swiotlb: add common swiotlb_map_ops
Currently all architectures that want to use swiotlb have to implement their own dma_map_ops instances. Provide a generic one based on the x86 implementation which first calls into dma_direct to try a full blown direct mapping implementation (including e.g. CMA) before falling back allocating from the swiotlb buffer. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
7f2c8bbd32
commit
251533eb35
|
@ -66,6 +66,12 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
|
|||
enum dma_sync_target target);
|
||||
|
||||
/* Accessory functions. */
|
||||
|
||||
void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flags, unsigned long attrs);
|
||||
void swiotlb_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
|
||||
extern void
|
||||
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags);
|
||||
|
@ -126,4 +132,6 @@ extern void swiotlb_print_info(void);
|
|||
extern int is_swiotlb_buffer(phys_addr_t paddr);
|
||||
extern void swiotlb_set_max_segment(unsigned int);
|
||||
|
||||
extern const struct dma_map_ops swiotlb_dma_ops;
|
||||
|
||||
#endif /* __LINUX_SWIOTLB_H */
|
||||
|
|
|
@ -1087,3 +1087,46 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
|||
return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
|
||||
}
|
||||
EXPORT_SYMBOL(swiotlb_dma_supported);
|
||||
|
||||
#ifdef CONFIG_DMA_DIRECT_OPS
|
||||
void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
/*
|
||||
* Don't print a warning when the first allocation attempt fails.
|
||||
* swiotlb_alloc_coherent() will print a warning when the DMA memory
|
||||
* allocation ultimately failed.
|
||||
*/
|
||||
gfp |= __GFP_NOWARN;
|
||||
|
||||
vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
|
||||
if (!vaddr)
|
||||
vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
void swiotlb_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
|
||||
else
|
||||
dma_direct_free(dev, size, vaddr, dma_addr, attrs);
|
||||
}
|
||||
|
||||
const struct dma_map_ops swiotlb_dma_ops = {
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
.alloc = swiotlb_alloc,
|
||||
.free = swiotlb_free,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
};
|
||||
#endif /* CONFIG_DMA_DIRECT_OPS */
|
||||
|
|
Loading…
Reference in New Issue
Block a user