forked from luck/tmp_suning_uos_patched
dma-direct: remove __dma_to_phys
There is no harm in just always clearing the SME encryption bit, while significantly simplifying the interface. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
This commit is contained in:
parent
96eb89caf7
commit
7bc5c428a6
|
@ -8,7 +8,7 @@ static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
|
||||
}
|
||||
|
||||
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
unsigned int offset = dev_addr & ~PAGE_MASK;
|
||||
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
|
||||
|
|
|
@ -52,7 +52,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
|
|||
return pa;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
struct bmips_dma_range *r;
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return paddr;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev && dev_is_pci(dev))
|
||||
|
|
|
@ -3,6 +3,6 @@
|
|||
#define _MIPS_DMA_DIRECT_H 1
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
|
||||
#endif /* _MIPS_DMA_DIRECT_H */
|
||||
|
|
|
@ -6,7 +6,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return paddr | 0x80000000;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr & 0x7fffffff;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return paddr | 0x80000000;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
if (dma_addr > 0x8fffffff)
|
||||
return dma_addr;
|
||||
|
|
|
@ -13,7 +13,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return ((nid << 44) ^ paddr) | (nid << node_id_offset);
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
|
||||
* Loongson-3's 48bit address space and embed it into 40bit */
|
||||
|
|
|
@ -175,7 +175,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return paddr + ar2315_dev_offset(dev);
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr - ar2315_dev_offset(dev);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return bc->baddr + paddr;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr & ~(0xffUL << 56);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return dma_addr;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK;
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return paddr + dev->archdata.dma_offset;
|
||||
}
|
||||
|
||||
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr - dev->archdata.dma_offset;
|
||||
}
|
||||
|
|
|
@ -24,11 +24,12 @@ static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
phys_addr_t paddr = (phys_addr_t)dev_addr;
|
||||
phys_addr_t paddr = (phys_addr_t)dev_addr +
|
||||
((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
||||
|
||||
return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
||||
return __sme_clr(paddr);
|
||||
}
|
||||
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
|
||||
|
||||
|
@ -44,7 +45,7 @@ static inline bool force_dma_unencrypted(struct device *dev)
|
|||
/*
|
||||
* If memory encryption is supported, phys_to_dma will set the memory encryption
|
||||
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
|
||||
* and __dma_to_phys versions should only be used on non-encrypted memory for
|
||||
* version should only be used on non-encrypted memory for
|
||||
* special occasions like DMA coherent buffers.
|
||||
*/
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
|
@ -52,11 +53,6 @@ static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|||
return __sme_set(__phys_to_dma(dev, paddr));
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return __sme_clr(__dma_to_phys(dev, daddr));
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
|
||||
bool is_ram)
|
||||
{
|
||||
|
|
|
@ -48,11 +48,6 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
|||
{
|
||||
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
|
||||
|
||||
if (force_dma_unencrypted(dev))
|
||||
*phys_limit = __dma_to_phys(dev, dma_limit);
|
||||
else
|
||||
*phys_limit = dma_to_phys(dev, dma_limit);
|
||||
|
||||
/*
|
||||
* Optimistically try the zone that the physical address mask falls
|
||||
* into first. If that returns memory that isn't actually addressable
|
||||
|
@ -61,6 +56,7 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
|||
* Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
|
||||
* zones.
|
||||
*/
|
||||
*phys_limit = dma_to_phys(dev, dma_limit);
|
||||
if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
|
||||
return GFP_DMA;
|
||||
if (*phys_limit <= DMA_BIT_MASK(32))
|
||||
|
|
Loading…
Reference in New Issue
Block a user