forked from luck/tmp_suning_uos_patched
dma-mapping fixes for 5.8:
- ensure we always have fully addressable memory in the dma coherent pool (Nicolas Saenz Julienne) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl8T+D0LHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYM6lRAAzDoUs32GJpawMANJWAde5DX3T5WUEWMLyGV0O2Ct 9Yzm3iDh25k5Lc8cr8l/UUpJL8B+uEkl/iW+GZQ6vvD3rxW0v5IfGwu8I4hqGiCo BpbsRr1VVXl2dLyA6sk/fXLYSqqWBoYzVdtZyRwgek6JOvA3ALy1jv7EkrBsE/UP 6F6kWUTkDiek9ZAP1d0ztCTDGiuAQhAvmmO4odfMqMjDAIYW4fL4CPhDeMl4We66 HNg+OJEF/aK5VC6qiY3629K3aMB0ZDz4oQzSIUO2H7RjuVzVr9Ce7JmKa+lBDxlS 6e5GAfqoJbVz1C0oT46XT1IsMJKcDDgmfr+pmjgeSNt9HzvYND413opUFyyUvLIE kpUHQUibMOFxHiHRGQeCJaGVLgF/ucSoBeLbMTDORMLOFbZTLgKN9CjiP8/RgUrc jL6lKa8LX3nyTlHTSHH7FPyu5waG2cLfLexntPMGQenXjLOxmS9Jg1Q+MjihjxH/ tAfGoeoCjgILOjZQmpZ9Ze5nSdgnEwfHpYAYFQi981/HACUxjZrunjOTNMLqCxu1 cu+bi0HjAhdoQRMC1YtIcffWabPvWYp0R5WqVs3ExKpJKXRO5xjuVdybUOGpj1Py uOWOtAGyOxD1vp51e37ZsrFO2q3J6bqUFSVMDyYUKoWlyHOS7cy4ULZHQXNpUAAq 9fA= =H248 -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.8-6' of git://git.infradead.org/users/hch/dma-mapping into master Pull dma-mapping fixes from Christoph Hellwig: "Ensure we always have fully addressable memory in the dma coherent pool (Nicolas Saenz Julienne)" * tag 'dma-mapping-5.8-6' of git://git.infradead.org/users/hch/dma-mapping: dma-pool: do not allocate pool memory from CMA dma-pool: make sure atomic pool suits device dma-pool: introduce dma_guess_pool() dma-pool: get rid of dma_in_atomic_pool() dma-direct: provide function to check physical memory area validity
This commit is contained in:
commit
8c18fc6344
|
@ -69,6 +69,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
|
|||
u64 dma_direct_get_required_mask(struct device *dev);
|
||||
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
u64 *phys_mask);
|
||||
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
|
||||
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
|
|
|
@ -70,7 +70,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||
{
|
||||
return phys_to_dma_direct(dev, phys) + size - 1 <=
|
||||
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
#include <linux/debugfs.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/set_memory.h>
|
||||
|
@ -69,12 +68,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
|
|||
|
||||
do {
|
||||
pool_size = 1 << (PAGE_SHIFT + order);
|
||||
|
||||
if (dev_get_cma_area(NULL))
|
||||
page = dma_alloc_from_contiguous(NULL, 1 << order,
|
||||
order, false);
|
||||
else
|
||||
page = alloc_pages(gfp, order);
|
||||
page = alloc_pages(gfp, order);
|
||||
} while (!page && order-- > 0);
|
||||
if (!page)
|
||||
goto out;
|
||||
|
@ -118,8 +112,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
|
|||
dma_common_free_remap(addr, pool_size);
|
||||
#endif
|
||||
free_page: __maybe_unused
|
||||
if (!dma_release_from_contiguous(NULL, page, 1 << order))
|
||||
__free_pages(page, order);
|
||||
__free_pages(page, order);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -203,7 +196,7 @@ static int __init dma_atomic_pool_init(void)
|
|||
}
|
||||
postcore_initcall(dma_atomic_pool_init);
|
||||
|
||||
static inline struct gen_pool *dev_to_pool(struct device *dev)
|
||||
static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
|
||||
{
|
||||
u64 phys_mask;
|
||||
gfp_t gfp;
|
||||
|
@ -217,51 +210,79 @@ static inline struct gen_pool *dev_to_pool(struct device *dev)
|
|||
return atomic_pool_kernel;
|
||||
}
|
||||
|
||||
static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size)
|
||||
static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
|
||||
{
|
||||
struct gen_pool *pool = dev_to_pool(dev);
|
||||
if (bad_pool == atomic_pool_kernel)
|
||||
return atomic_pool_dma32 ? : atomic_pool_dma;
|
||||
|
||||
if (unlikely(!pool))
|
||||
return false;
|
||||
return gen_pool_has_addr(pool, (unsigned long)start, size);
|
||||
if (bad_pool == atomic_pool_dma32)
|
||||
return atomic_pool_dma;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct gen_pool *dma_guess_pool(struct device *dev,
|
||||
struct gen_pool *bad_pool)
|
||||
{
|
||||
if (bad_pool)
|
||||
return dma_get_safer_pool(bad_pool);
|
||||
|
||||
return dma_guess_pool_from_device(dev);
|
||||
}
|
||||
|
||||
void *dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
struct gen_pool *pool = dev_to_pool(dev);
|
||||
unsigned long val;
|
||||
struct gen_pool *pool = NULL;
|
||||
unsigned long val = 0;
|
||||
void *ptr = NULL;
|
||||
phys_addr_t phys;
|
||||
|
||||
if (!pool) {
|
||||
WARN(1, "%pGg atomic pool not initialised!\n", &flags);
|
||||
return NULL;
|
||||
while (1) {
|
||||
pool = dma_guess_pool(dev, pool);
|
||||
if (!pool) {
|
||||
WARN(1, "Failed to get suitable pool for %s\n",
|
||||
dev_name(dev));
|
||||
break;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(pool, size);
|
||||
if (!val)
|
||||
continue;
|
||||
|
||||
phys = gen_pool_virt_to_phys(pool, val);
|
||||
if (dma_coherent_ok(dev, phys, size))
|
||||
break;
|
||||
|
||||
gen_pool_free(pool, val, size);
|
||||
val = 0;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(pool, size);
|
||||
if (likely(val)) {
|
||||
phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
|
||||
|
||||
if (val) {
|
||||
*ret_page = pfn_to_page(__phys_to_pfn(phys));
|
||||
ptr = (void *)val;
|
||||
memset(ptr, 0, size);
|
||||
} else {
|
||||
WARN_ONCE(1, "DMA coherent pool depleted, increase size "
|
||||
"(recommended min coherent_pool=%zuK)\n",
|
||||
gen_pool_size(pool) >> 9);
|
||||
|
||||
if (gen_pool_avail(pool) < atomic_pool_size)
|
||||
schedule_work(&atomic_pool_work);
|
||||
}
|
||||
if (gen_pool_avail(pool) < atomic_pool_size)
|
||||
schedule_work(&atomic_pool_work);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
bool dma_free_from_pool(struct device *dev, void *start, size_t size)
|
||||
{
|
||||
struct gen_pool *pool = dev_to_pool(dev);
|
||||
struct gen_pool *pool = NULL;
|
||||
|
||||
if (!dma_in_atomic_pool(dev, start, size))
|
||||
return false;
|
||||
gen_pool_free(pool, (unsigned long)start, size);
|
||||
return true;
|
||||
while (1) {
|
||||
pool = dma_guess_pool(dev, pool);
|
||||
if (!pool)
|
||||
return false;
|
||||
|
||||
if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
|
||||
gen_pool_free(pool, (unsigned long)start, size);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user