forked from luck/tmp_suning_uos_patched
swiotlb: remove SWIOTLB_MAP_ERROR
We can use DMA_MAPPING_ERROR instead, which already maps to the same value. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
e5361ca29f
commit
b907e20508
|
@ -403,7 +403,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
||||||
|
|
||||||
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
|
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
|
||||||
attrs);
|
attrs);
|
||||||
if (map == SWIOTLB_MAP_ERROR)
|
if (map == DMA_MAPPING_ERROR)
|
||||||
return DMA_MAPPING_ERROR;
|
return DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
dev_addr = xen_phys_to_bus(map);
|
dev_addr = xen_phys_to_bus(map);
|
||||||
|
@ -572,7 +572,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
||||||
sg_phys(sg),
|
sg_phys(sg),
|
||||||
sg->length,
|
sg->length,
|
||||||
dir, attrs);
|
dir, attrs);
|
||||||
if (map == SWIOTLB_MAP_ERROR) {
|
if (map == DMA_MAPPING_ERROR) {
|
||||||
dev_warn(hwdev, "swiotlb buffer is full\n");
|
dev_warn(hwdev, "swiotlb buffer is full\n");
|
||||||
/* Don't panic here, we expect map_sg users
|
/* Don't panic here, we expect map_sg users
|
||||||
to do proper error handling. */
|
to do proper error handling. */
|
||||||
|
|
|
@ -46,9 +46,6 @@ enum dma_sync_target {
|
||||||
SYNC_FOR_DEVICE = 1,
|
SYNC_FOR_DEVICE = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* define the last possible byte of physical address space as a mapping error */
|
|
||||||
#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
|
|
||||||
|
|
||||||
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
||||||
dma_addr_t tbl_dma_addr,
|
dma_addr_t tbl_dma_addr,
|
||||||
phys_addr_t phys, size_t size,
|
phys_addr_t phys, size_t size,
|
||||||
|
|
|
@ -526,7 +526,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
||||||
spin_unlock_irqrestore(&io_tlb_lock, flags);
|
spin_unlock_irqrestore(&io_tlb_lock, flags);
|
||||||
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
|
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
|
||||||
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
|
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
|
||||||
return SWIOTLB_MAP_ERROR;
|
return DMA_MAPPING_ERROR;
|
||||||
found:
|
found:
|
||||||
spin_unlock_irqrestore(&io_tlb_lock, flags);
|
spin_unlock_irqrestore(&io_tlb_lock, flags);
|
||||||
|
|
||||||
|
@ -637,7 +637,7 @@ static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
|
||||||
/* Oh well, have to allocate and map a bounce buffer. */
|
/* Oh well, have to allocate and map a bounce buffer. */
|
||||||
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
|
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
|
||||||
*phys, size, dir, attrs);
|
*phys, size, dir, attrs);
|
||||||
if (*phys == SWIOTLB_MAP_ERROR)
|
if (*phys == DMA_MAPPING_ERROR)
|
||||||
return DMA_MAPPING_ERROR;
|
return DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
/* Ensure that the address returned is DMA'ble */
|
/* Ensure that the address returned is DMA'ble */
|
||||||
|
|
Loading…
Reference in New Issue
Block a user