forked from luck/tmp_suning_uos_patched
dma-mapping: remove leftover NULL device support
Most dma_map_ops implementations already had some issues with a NULL device, or did simply crash if one was fed to them. Now that we have cleaned up all the obvious offenders we can stop to pretend we support this mode. Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
5ab6a91a1c
commit
d7e02a9312
|
@ -365,13 +365,12 @@ __get_free_pages() (but takes size instead of a page order). If your
|
|||
driver needs regions sized smaller than a page, you may prefer using
|
||||
the dma_pool interface, described below.
|
||||
|
||||
The consistent DMA mapping interfaces, for non-NULL dev, will by
|
||||
default return a DMA address which is 32-bit addressable. Even if the
|
||||
device indicates (via DMA mask) that it may address the upper 32-bits,
|
||||
consistent allocation will only return > 32-bit addresses for DMA if
|
||||
the consistent DMA mask has been explicitly changed via
|
||||
dma_set_coherent_mask(). This is true of the dma_pool interface as
|
||||
well.
|
||||
The consistent DMA mapping interfaces, will by default return a DMA address
|
||||
which is 32-bit addressable. Even if the device indicates (via the DMA mask)
|
||||
that it may address the upper 32-bits, consistent allocation will only
|
||||
return > 32-bit addresses for DMA if the consistent DMA mask has been
|
||||
explicitly changed via dma_set_coherent_mask(). This is true of the
|
||||
dma_pool interface as well.
|
||||
|
||||
dma_alloc_coherent() returns two values: the virtual address which you
|
||||
can use to access it from the CPU and dma_handle which you pass to the
|
||||
|
|
|
@ -267,9 +267,9 @@ size_t dma_direct_max_mapping_size(struct device *dev);
|
|||
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->dma_ops)
|
||||
if (dev->dma_ops)
|
||||
return dev->dma_ops;
|
||||
return get_arch_dma_ops(dev ? dev->bus : NULL);
|
||||
return get_arch_dma_ops(dev->bus);
|
||||
}
|
||||
|
||||
static inline void set_dma_ops(struct device *dev,
|
||||
|
@ -650,7 +650,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
|
|||
|
||||
static inline u64 dma_get_mask(struct device *dev)
|
||||
{
|
||||
if (dev && dev->dma_mask && *dev->dma_mask)
|
||||
if (dev->dma_mask && *dev->dma_mask)
|
||||
return *dev->dma_mask;
|
||||
return DMA_BIT_MASK(32);
|
||||
}
|
||||
|
|
|
@ -311,7 +311,7 @@ static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
|
|||
size_t size)
|
||||
{
|
||||
return swiotlb_force != SWIOTLB_FORCE &&
|
||||
(!dev || dma_capable(dev, dma_addr, size));
|
||||
dma_capable(dev, dma_addr, size);
|
||||
}
|
||||
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
|
|
Loading…
Reference in New Issue
Block a user