forked from luck/tmp_suning_uos_patched
dma-mapping updates for 5.8, part 1
- enhance the dma pool to allow atomic allocation on x86 with AMD SEV (David Rientjes) - two small cleanups (Jason Yan and Peter Collingbourne) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl7bvTULHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMJVhAAgTiWNzxPJhM6RTeRooM6W0NvcZGTJT6ExyJghaau aJvHUjXPrRmeBM8Zjwbbu5dioncd8c7npfRjBvATaEL74pa1u9gH3jnUTxh6L4WQ /FTNYryZVbprXJsdFuDZvCsO/CChqfZL8PWz+NFgIpICOyyXdorQELMhCaeOhnfU /goq6SvKmPlmXdb4eM2fXRD7udt1qlp+Oq2EZUdT3Xb4CBFsWUYbOMde22VY390Z 2E9mEztOaKjNgAM/TfCoXo7iRUSwxcpO5aSliDhJJ/7uWaxyWTzFlaoIlwIkkNKb TcguNJbIZtjIXwBMv9gS6CqVEgFymmWqX5Tr23+vbb7S/235HqKtN1dPmV2h4R0H QOpvYXfm6kc4tpH4J32NMp+IqfQmwgMbNtUsiXWk5Lxl27cb8K2Q5eqEwxRWMbG+ HObO7Kzb8oCygWwozZ+3QcWSr+9QAgzsb4Jl4jg6adjd8LDcbmKo4B9TKptGpVnL xjDleKdb/P4Vq55q9KHFLjqFUesuQIv2mKl2s+zr2BqROxjZ562kM9QHwsoCqc4Q tFuVed+XOoT7yhdKdtwEK7lwcQBtZgP5l/HgsoosmuJ975holsQ4pbKSf4A2Y4yo XwHYonSwOAEbi4nPxnvKIm4aUNq+PC44TH0VJcXud3tmQ/DGipdlLW8/nyw9ecfa qaQ= =GT3J -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - enhance the dma pool to allow atomic allocation on x86 with AMD SEV (David Rientjes) - two small cleanups (Jason Yan and Peter Collingbourne) * tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping: dma-contiguous: fix comment for dma_release_from_contiguous dma-pool: scale the default DMA coherent pool size with memory capacity x86/mm: unencrypted non-blocking DMA allocations use coherent pools dma-pool: add pool sizes to debugfs dma-direct: atomic allocations must come from atomic coherent pools dma-pool: dynamically expanding atomic pools dma-pool: add additional coherent pools to map to gfp mask dma-remap: separate DMA atomic pools from direct remap code dma-debug: make __dma_entry_alloc_check_leak() static
This commit is contained in:
commit
1ee18de929
|
@ -1524,6 +1524,7 @@ config X86_CPA_STATISTICS
|
|||
config AMD_MEM_ENCRYPT
|
||||
bool "AMD Secure Memory Encryption (SME) support"
|
||||
depends on X86_64 && CPU_SUP_AMD
|
||||
select DMA_COHERENT_POOL
|
||||
select DYNAMIC_PHYSICAL_MASK
|
||||
select ARCH_USE_MEMREMAP_PROT
|
||||
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
|
|
|
@ -952,7 +952,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
|
|||
|
||||
/* Non-coherent atomic allocation? Easy */
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
dma_free_from_pool(cpu_addr, alloc_size))
|
||||
dma_free_from_pool(dev, cpu_addr, alloc_size))
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
|
||||
|
@ -1035,7 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
|||
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
!gfpflags_allow_blocking(gfp) && !coherent)
|
||||
cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
|
||||
cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
|
||||
gfp);
|
||||
else
|
||||
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
|
||||
if (!cpu_addr)
|
||||
|
|
|
@ -67,6 +67,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
|
|||
}
|
||||
|
||||
u64 dma_direct_get_required_mask(struct device *dev);
|
||||
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
u64 *phys_mask);
|
||||
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
|
|
|
@ -630,9 +630,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
|||
pgprot_t prot, const void *caller);
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size);
|
||||
|
||||
bool dma_in_atomic_pool(void *start, size_t size);
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
|
||||
bool dma_free_from_pool(void *start, size_t size);
|
||||
void *dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
struct page **ret_page, gfp_t flags);
|
||||
bool dma_free_from_pool(struct device *dev, void *start, size_t size);
|
||||
|
||||
int
|
||||
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||
|
|
|
@ -79,10 +79,14 @@ config DMA_REMAP
|
|||
select DMA_NONCOHERENT_MMAP
|
||||
bool
|
||||
|
||||
config DMA_DIRECT_REMAP
|
||||
config DMA_COHERENT_POOL
|
||||
bool
|
||||
select DMA_REMAP
|
||||
|
||||
config DMA_DIRECT_REMAP
|
||||
bool
|
||||
select DMA_COHERENT_POOL
|
||||
|
||||
config DMA_CMA
|
||||
bool "DMA Contiguous Memory Allocator"
|
||||
depends on HAVE_DMA_CONTIGUOUS && CMA
|
||||
|
|
|
@ -6,4 +6,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
|
|||
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
|
||||
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
|
||||
obj-$(CONFIG_SWIOTLB) += swiotlb.o
|
||||
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
|
||||
obj-$(CONFIG_DMA_REMAP) += remap.o
|
||||
|
|
|
@ -222,8 +222,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|||
* @gfp: Allocation flags.
|
||||
*
|
||||
* This function allocates contiguous memory buffer for specified device. It
|
||||
* first tries to use device specific contiguous memory area if available or
|
||||
* the default global one, then tries a fallback allocation of normal pages.
|
||||
* tries to use device specific contiguous memory area if available, or the
|
||||
* default global one.
|
||||
*
|
||||
* Note that it byapss one-page size of allocations from the global area as
|
||||
* the addresses within one page are always contiguous, so there is no need
|
||||
|
|
|
@ -656,7 +656,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
|
|||
return entry;
|
||||
}
|
||||
|
||||
void __dma_entry_alloc_check_leak(void)
|
||||
static void __dma_entry_alloc_check_leak(void)
|
||||
{
|
||||
u32 tmp = nr_total_entries % nr_prealloc_entries;
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ u64 dma_direct_get_required_mask(struct device *dev)
|
|||
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
|
||||
}
|
||||
|
||||
static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
u64 *phys_limit)
|
||||
{
|
||||
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
|
||||
|
@ -76,6 +76,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
|||
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrypting memory is allowed to block, so if this device requires
|
||||
* unencrypted memory it must come from atomic pools.
|
||||
*/
|
||||
static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
|
||||
return false;
|
||||
if (gfpflags_allow_blocking(gfp))
|
||||
return false;
|
||||
if (force_dma_unencrypted(dev))
|
||||
return true;
|
||||
if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
|
||||
return false;
|
||||
if (dma_alloc_need_uncached(dev, attrs))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool dma_should_free_from_pool(struct device *dev,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
|
||||
return true;
|
||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||
!force_dma_unencrypted(dev))
|
||||
return false;
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
|
@ -89,7 +122,7 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
|
||||
/* we always manually zero the memory once we are done: */
|
||||
gfp &= ~__GFP_ZERO;
|
||||
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
&phys_limit);
|
||||
page = dma_alloc_contiguous(dev, alloc_size, gfp);
|
||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
|
@ -125,10 +158,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
struct page *page;
|
||||
void *ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
dma_alloc_need_uncached(dev, attrs) &&
|
||||
!gfpflags_allow_blocking(gfp)) {
|
||||
ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
|
||||
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
|
||||
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
goto done;
|
||||
|
@ -204,6 +235,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
{
|
||||
unsigned int page_order = get_order(size);
|
||||
|
||||
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
|
||||
if (dma_should_free_from_pool(dev, attrs) &&
|
||||
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
|
||||
return;
|
||||
|
||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||
!force_dma_unencrypted(dev)) {
|
||||
/* cpu_addr is a struct page cookie, not a kernel address */
|
||||
|
@ -211,10 +247,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
return;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
|
||||
return;
|
||||
|
||||
if (force_dma_unencrypted(dev))
|
||||
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
|
||||
|
||||
|
|
264
kernel/dma/pool.c
Normal file
264
kernel/dma/pool.c
Normal file
|
@ -0,0 +1,264 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Copyright (C) 2020 Google LLC
|
||||
*/
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
static struct gen_pool *atomic_pool_dma __ro_after_init;
|
||||
static unsigned long pool_size_dma;
|
||||
static struct gen_pool *atomic_pool_dma32 __ro_after_init;
|
||||
static unsigned long pool_size_dma32;
|
||||
static struct gen_pool *atomic_pool_kernel __ro_after_init;
|
||||
static unsigned long pool_size_kernel;
|
||||
|
||||
/* Size can be defined by the coherent_pool command line */
|
||||
static size_t atomic_pool_size;
|
||||
|
||||
/* Dynamic background expansion when the atomic pool is near capacity */
|
||||
static struct work_struct atomic_pool_work;
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
{
|
||||
atomic_pool_size = memparse(p, &p);
|
||||
return 0;
|
||||
}
|
||||
early_param("coherent_pool", early_coherent_pool);
|
||||
|
||||
static void __init dma_atomic_pool_debugfs_init(void)
|
||||
{
|
||||
struct dentry *root;
|
||||
|
||||
root = debugfs_create_dir("dma_pools", NULL);
|
||||
if (IS_ERR_OR_NULL(root))
|
||||
return;
|
||||
|
||||
debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
|
||||
debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
|
||||
debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
|
||||
}
|
||||
|
||||
static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
|
||||
{
|
||||
if (gfp & __GFP_DMA)
|
||||
pool_size_dma += size;
|
||||
else if (gfp & __GFP_DMA32)
|
||||
pool_size_dma32 += size;
|
||||
else
|
||||
pool_size_kernel += size;
|
||||
}
|
||||
|
||||
static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned int order;
|
||||
struct page *page;
|
||||
void *addr;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/* Cannot allocate larger than MAX_ORDER-1 */
|
||||
order = min(get_order(pool_size), MAX_ORDER-1);
|
||||
|
||||
do {
|
||||
pool_size = 1 << (PAGE_SHIFT + order);
|
||||
|
||||
if (dev_get_cma_area(NULL))
|
||||
page = dma_alloc_from_contiguous(NULL, 1 << order,
|
||||
order, false);
|
||||
else
|
||||
page = alloc_pages(gfp, order);
|
||||
} while (!page && order-- > 0);
|
||||
if (!page)
|
||||
goto out;
|
||||
|
||||
arch_dma_prep_coherent(page, pool_size);
|
||||
|
||||
#ifdef CONFIG_DMA_DIRECT_REMAP
|
||||
addr = dma_common_contiguous_remap(page, pool_size,
|
||||
pgprot_dmacoherent(PAGE_KERNEL),
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
goto free_page;
|
||||
#else
|
||||
addr = page_to_virt(page);
|
||||
#endif
|
||||
/*
|
||||
* Memory in the atomic DMA pools must be unencrypted, the pools do not
|
||||
* shrink so no re-encryption occurs in dma_direct_free_pages().
|
||||
*/
|
||||
ret = set_memory_decrypted((unsigned long)page_to_virt(page),
|
||||
1 << order);
|
||||
if (ret)
|
||||
goto remove_mapping;
|
||||
ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
|
||||
pool_size, NUMA_NO_NODE);
|
||||
if (ret)
|
||||
goto encrypt_mapping;
|
||||
|
||||
dma_atomic_pool_size_add(gfp, pool_size);
|
||||
return 0;
|
||||
|
||||
encrypt_mapping:
|
||||
ret = set_memory_encrypted((unsigned long)page_to_virt(page),
|
||||
1 << order);
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
/* Decrypt succeeded but encrypt failed, purposely leak */
|
||||
goto out;
|
||||
}
|
||||
remove_mapping:
|
||||
#ifdef CONFIG_DMA_DIRECT_REMAP
|
||||
dma_common_free_remap(addr, pool_size);
|
||||
#endif
|
||||
free_page: __maybe_unused
|
||||
if (!dma_release_from_contiguous(NULL, page, 1 << order))
|
||||
__free_pages(page, order);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
|
||||
{
|
||||
if (pool && gen_pool_avail(pool) < atomic_pool_size)
|
||||
atomic_pool_expand(pool, gen_pool_size(pool), gfp);
|
||||
}
|
||||
|
||||
static void atomic_pool_work_fn(struct work_struct *work)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA))
|
||||
atomic_pool_resize(atomic_pool_dma,
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
atomic_pool_resize(atomic_pool_dma32,
|
||||
GFP_KERNEL | GFP_DMA32);
|
||||
atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct gen_pool *pool;
|
||||
int ret;
|
||||
|
||||
pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
|
||||
if (!pool)
|
||||
return NULL;
|
||||
|
||||
gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
|
||||
|
||||
ret = atomic_pool_expand(pool, pool_size, gfp);
|
||||
if (ret) {
|
||||
gen_pool_destroy(pool);
|
||||
pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
|
||||
pool_size >> 10, &gfp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
|
||||
gen_pool_size(pool) >> 10, &gfp);
|
||||
return pool;
|
||||
}
|
||||
|
||||
static int __init dma_atomic_pool_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If coherent_pool was not used on the command line, default the pool
|
||||
* sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
|
||||
*/
|
||||
if (!atomic_pool_size) {
|
||||
atomic_pool_size = max(totalram_pages() >> PAGE_SHIFT, 1UL) *
|
||||
SZ_128K;
|
||||
atomic_pool_size = min_t(size_t, atomic_pool_size,
|
||||
1 << (PAGE_SHIFT + MAX_ORDER-1));
|
||||
}
|
||||
INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
|
||||
|
||||
atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
|
||||
GFP_KERNEL);
|
||||
if (!atomic_pool_kernel)
|
||||
ret = -ENOMEM;
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
|
||||
atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!atomic_pool_dma)
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
|
||||
atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
|
||||
GFP_KERNEL | GFP_DMA32);
|
||||
if (!atomic_pool_dma32)
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
|
||||
dma_atomic_pool_debugfs_init();
|
||||
return ret;
|
||||
}
|
||||
postcore_initcall(dma_atomic_pool_init);
|
||||
|
||||
static inline struct gen_pool *dev_to_pool(struct device *dev)
|
||||
{
|
||||
u64 phys_mask;
|
||||
gfp_t gfp;
|
||||
|
||||
gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
&phys_mask);
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
|
||||
return atomic_pool_dma;
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
|
||||
return atomic_pool_dma32;
|
||||
return atomic_pool_kernel;
|
||||
}
|
||||
|
||||
static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size)
|
||||
{
|
||||
struct gen_pool *pool = dev_to_pool(dev);
|
||||
|
||||
if (unlikely(!pool))
|
||||
return false;
|
||||
return gen_pool_has_addr(pool, (unsigned long)start, size);
|
||||
}
|
||||
|
||||
void *dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
struct gen_pool *pool = dev_to_pool(dev);
|
||||
unsigned long val;
|
||||
void *ptr = NULL;
|
||||
|
||||
if (!pool) {
|
||||
WARN(1, "%pGg atomic pool not initialised!\n", &flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(pool, size);
|
||||
if (val) {
|
||||
phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
|
||||
|
||||
*ret_page = pfn_to_page(__phys_to_pfn(phys));
|
||||
ptr = (void *)val;
|
||||
memset(ptr, 0, size);
|
||||
}
|
||||
if (gen_pool_avail(pool) < atomic_pool_size)
|
||||
schedule_work(&atomic_pool_work);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
bool dma_free_from_pool(struct device *dev, void *start, size_t size)
|
||||
{
|
||||
struct gen_pool *pool = dev_to_pool(dev);
|
||||
|
||||
if (!dma_in_atomic_pool(dev, start, size))
|
||||
return false;
|
||||
gen_pool_free(pool, (unsigned long)start, size);
|
||||
return true;
|
||||
}
|
|
@ -1,13 +1,8 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Copyright (c) 2014 The Linux Foundation
|
||||
*/
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
|
@ -73,117 +68,3 @@ void dma_common_free_remap(void *cpu_addr, size_t size)
|
|||
unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
|
||||
vunmap(cpu_addr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DMA_DIRECT_REMAP
|
||||
static struct gen_pool *atomic_pool __ro_after_init;
|
||||
|
||||
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
||||
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
{
|
||||
atomic_pool_size = memparse(p, &p);
|
||||
return 0;
|
||||
}
|
||||
early_param("coherent_pool", early_coherent_pool);
|
||||
|
||||
static gfp_t dma_atomic_pool_gfp(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA))
|
||||
return GFP_DMA;
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
return GFP_DMA32;
|
||||
return GFP_KERNEL;
|
||||
}
|
||||
|
||||
static int __init dma_atomic_pool_init(void)
|
||||
{
|
||||
unsigned int pool_size_order = get_order(atomic_pool_size);
|
||||
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
|
||||
struct page *page;
|
||||
void *addr;
|
||||
int ret;
|
||||
|
||||
if (dev_get_cma_area(NULL))
|
||||
page = dma_alloc_from_contiguous(NULL, nr_pages,
|
||||
pool_size_order, false);
|
||||
else
|
||||
page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
|
||||
if (!page)
|
||||
goto out;
|
||||
|
||||
arch_dma_prep_coherent(page, atomic_pool_size);
|
||||
|
||||
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!atomic_pool)
|
||||
goto free_page;
|
||||
|
||||
addr = dma_common_contiguous_remap(page, atomic_pool_size,
|
||||
pgprot_dmacoherent(PAGE_KERNEL),
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
goto destroy_genpool;
|
||||
|
||||
ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
|
||||
page_to_phys(page), atomic_pool_size, -1);
|
||||
if (ret)
|
||||
goto remove_mapping;
|
||||
gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
|
||||
|
||||
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
|
||||
atomic_pool_size / 1024);
|
||||
return 0;
|
||||
|
||||
remove_mapping:
|
||||
dma_common_free_remap(addr, atomic_pool_size);
|
||||
destroy_genpool:
|
||||
gen_pool_destroy(atomic_pool);
|
||||
atomic_pool = NULL;
|
||||
free_page:
|
||||
if (!dma_release_from_contiguous(NULL, page, nr_pages))
|
||||
__free_pages(page, pool_size_order);
|
||||
out:
|
||||
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
|
||||
atomic_pool_size / 1024);
|
||||
return -ENOMEM;
|
||||
}
|
||||
postcore_initcall(dma_atomic_pool_init);
|
||||
|
||||
bool dma_in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
if (unlikely(!atomic_pool))
|
||||
return false;
|
||||
|
||||
return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
|
||||
}
|
||||
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
unsigned long val;
|
||||
void *ptr = NULL;
|
||||
|
||||
if (!atomic_pool) {
|
||||
WARN(1, "coherent pool not initialised!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(atomic_pool, size);
|
||||
if (val) {
|
||||
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
|
||||
|
||||
*ret_page = pfn_to_page(__phys_to_pfn(phys));
|
||||
ptr = (void *)val;
|
||||
memset(ptr, 0, size);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
bool dma_free_from_pool(void *start, size_t size)
|
||||
{
|
||||
if (!dma_in_atomic_pool(start, size))
|
||||
return false;
|
||||
gen_pool_free(atomic_pool, (unsigned long)start, size);
|
||||
return true;
|
||||
}
|
||||
#endif /* CONFIG_DMA_DIRECT_REMAP */
|
||||
|
|
Loading…
Reference in New Issue
Block a user