swiotlb: remove SWIOTLB_MAP_ERROR

We can use DMA_MAPPING_ERROR instead, which already maps to the same
value.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Christoph Hellwig 2018-12-03 11:42:52 +01:00
parent e5361ca29f
commit b907e20508
3 changed files with 4 additions and 7 deletions

View File

@ -403,7 +403,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
attrs); attrs);
if (map == SWIOTLB_MAP_ERROR) if (map == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map); dev_addr = xen_phys_to_bus(map);
@ -572,7 +572,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg_phys(sg), sg_phys(sg),
sg->length, sg->length,
dir, attrs); dir, attrs);
if (map == SWIOTLB_MAP_ERROR) { if (map == DMA_MAPPING_ERROR) {
dev_warn(hwdev, "swiotlb buffer is full\n"); dev_warn(hwdev, "swiotlb buffer is full\n");
/* Don't panic here, we expect map_sg users /* Don't panic here, we expect map_sg users
to do proper error handling. */ to do proper error handling. */

View File

@ -46,9 +46,6 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1, SYNC_FOR_DEVICE = 1,
}; };
/* define the last possible byte of physical address space as a mapping error */
#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr, dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size, phys_addr_t phys, size_t size,

View File

@ -526,7 +526,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return SWIOTLB_MAP_ERROR; return DMA_MAPPING_ERROR;
found: found:
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
@ -637,7 +637,7 @@ static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
/* Oh well, have to allocate and map a bounce buffer. */ /* Oh well, have to allocate and map a bounce buffer. */
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
*phys, size, dir, attrs); *phys, size, dir, attrs);
if (*phys == SWIOTLB_MAP_ERROR) if (*phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
/* Ensure that the address returned is DMA'ble */ /* Ensure that the address returned is DMA'ble */