forked from luck/tmp_suning_uos_patched
sh: declared coherent memory support V2
This patch adds declared coherent memory support to the sh architecture. All functions are based on the x86 implementation. Header files are adjusted to use the new functions instead of the former consistent_alloc() code. This version includes the few changes what were included in the fix patch together with modifications based on feedback from Paul. Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
4862ec0739
commit
f93e97eaea
|
@ -3,6 +3,8 @@
|
|||
*
|
||||
* Copyright (C) 2004 - 2007 Paul Mundt
|
||||
*
|
||||
* Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
|
@ -13,66 +15,146 @@
|
|||
#include <asm/addrspace.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
|
||||
struct dma_coherent_mem {
|
||||
void *virt_base;
|
||||
u32 device_base;
|
||||
int size;
|
||||
int flags;
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
struct page *page, *end, *free;
|
||||
void *ret, *vp;
|
||||
int order;
|
||||
void *ret;
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
int order = get_order(size);
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
order = get_order(size);
|
||||
|
||||
page = alloc_pages(gfp, order);
|
||||
if (!page)
|
||||
return NULL;
|
||||
split_page(page, order);
|
||||
|
||||
ret = page_address(page);
|
||||
*handle = virt_to_phys(ret);
|
||||
|
||||
vp = ioremap_nocache(*handle, size);
|
||||
if (!vp) {
|
||||
free_pages((unsigned long)ret, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(vp, 0, size);
|
||||
|
||||
/*
|
||||
* We must flush the cache before we pass it on to the device
|
||||
*/
|
||||
dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
page = virt_to_page(ret);
|
||||
free = page + (size >> PAGE_SHIFT);
|
||||
end = page + (1 << order);
|
||||
|
||||
while (++page < end) {
|
||||
/* Free any unused pages */
|
||||
if (page >= free) {
|
||||
__free_page(page);
|
||||
if (mem) {
|
||||
int page = bitmap_find_free_region(mem->bitmap, mem->size,
|
||||
order);
|
||||
if (page >= 0) {
|
||||
*dma_handle = mem->device_base + (page << PAGE_SHIFT);
|
||||
ret = mem->virt_base + (page << PAGE_SHIFT);
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
}
|
||||
if (mem->flags & DMA_MEMORY_EXCLUSIVE)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return vp;
|
||||
}
|
||||
EXPORT_SYMBOL(consistent_alloc);
|
||||
ret = (void *)__get_free_pages(gfp, order);
|
||||
|
||||
void consistent_free(void *vaddr, size_t size, dma_addr_t dma_handle)
|
||||
if (ret != NULL) {
|
||||
memset(ret, 0, size);
|
||||
/*
|
||||
* Pages from the page allocator may have data present in
|
||||
* cache. So flush the cache before using uncached memory.
|
||||
*/
|
||||
dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
|
||||
*dma_handle = virt_to_phys(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_alloc_coherent);
|
||||
|
||||
void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long addr;
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
int order = get_order(size);
|
||||
|
||||
addr = (unsigned long)phys_to_virt((unsigned long)dma_handle);
|
||||
page = virt_to_page(addr);
|
||||
if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||
|
||||
free_pages(addr, get_order(size));
|
||||
|
||||
iounmap(vaddr);
|
||||
bitmap_release_region(mem->bitmap, page, order);
|
||||
} else {
|
||||
WARN_ON(irqs_disabled()); /* for portability */
|
||||
BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
|
||||
free_pages((unsigned long)vaddr, order);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(consistent_free);
|
||||
EXPORT_SYMBOL(dma_free_coherent);
|
||||
|
||||
void consistent_sync(void *vaddr, size_t size, int direction)
|
||||
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags)
|
||||
{
|
||||
void __iomem *mem_base = NULL;
|
||||
int pages = size >> PAGE_SHIFT;
|
||||
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
||||
|
||||
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
|
||||
goto out;
|
||||
if (!size)
|
||||
goto out;
|
||||
if (dev->dma_mem)
|
||||
goto out;
|
||||
|
||||
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
|
||||
|
||||
mem_base = ioremap_nocache(bus_addr, size);
|
||||
if (!mem_base)
|
||||
goto out;
|
||||
|
||||
dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
||||
if (!dev->dma_mem)
|
||||
goto out;
|
||||
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
if (!dev->dma_mem->bitmap)
|
||||
goto free1_out;
|
||||
|
||||
dev->dma_mem->virt_base = mem_base;
|
||||
dev->dma_mem->device_base = device_addr;
|
||||
dev->dma_mem->size = pages;
|
||||
dev->dma_mem->flags = flags;
|
||||
|
||||
if (flags & DMA_MEMORY_MAP)
|
||||
return DMA_MEMORY_MAP;
|
||||
|
||||
return DMA_MEMORY_IO;
|
||||
|
||||
free1_out:
|
||||
kfree(dev->dma_mem);
|
||||
out:
|
||||
if (mem_base)
|
||||
iounmap(mem_base);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_declare_coherent_memory);
|
||||
|
||||
void dma_release_declared_memory(struct device *dev)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||
|
||||
if (!mem)
|
||||
return;
|
||||
dev->dma_mem = NULL;
|
||||
iounmap(mem->virt_base);
|
||||
kfree(mem->bitmap);
|
||||
kfree(mem);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_release_declared_memory);
|
||||
|
||||
void *dma_mark_declared_memory_occupied(struct device *dev,
|
||||
dma_addr_t device_addr, size_t size)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||
int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
int pos, err;
|
||||
|
||||
if (!mem)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
|
||||
err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
|
||||
if (err != 0)
|
||||
return ERR_PTR(err);
|
||||
return mem->virt_base + (pos << PAGE_SHIFT);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_CPU_SH5
|
||||
void *p1addr = vaddr;
|
||||
|
@ -94,4 +176,4 @@ void consistent_sync(void *vaddr, size_t size, int direction)
|
|||
BUG();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(consistent_sync);
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
|
|
|
@ -8,11 +8,6 @@
|
|||
|
||||
extern struct bus_type pci_bus_type;
|
||||
|
||||
/* arch/sh/mm/consistent.c */
|
||||
extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle);
|
||||
extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
|
||||
extern void consistent_sync(void *vaddr, size_t size, int direction);
|
||||
|
||||
#define dma_supported(dev, mask) (1)
|
||||
|
||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||
|
@ -25,44 +20,19 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
if (sh_mv.mv_consistent_alloc) {
|
||||
void *ret;
|
||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
|
||||
ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag);
|
||||
if (ret != NULL)
|
||||
return ret;
|
||||
}
|
||||
void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
|
||||
return consistent_alloc(flag, size, dma_handle);
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
if (sh_mv.mv_consistent_free) {
|
||||
int ret;
|
||||
|
||||
ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle);
|
||||
if (ret == 0)
|
||||
return;
|
||||
}
|
||||
|
||||
consistent_free(vaddr, size, dma_handle);
|
||||
}
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
#define dma_is_consistent(d, h) (1)
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
consistent_sync(vaddr, size, (int)dir);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_single(struct device *dev,
|
||||
void *ptr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
|
@ -205,4 +175,18 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
|
|||
{
|
||||
return dma_addr == 0;
|
||||
}
|
||||
|
||||
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
|
||||
|
||||
extern int
|
||||
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags);
|
||||
|
||||
extern void
|
||||
dma_release_declared_memory(struct device *dev);
|
||||
|
||||
extern void *
|
||||
dma_mark_declared_memory_occupied(struct device *dev,
|
||||
dma_addr_t device_addr, size_t size);
|
||||
|
||||
#endif /* __ASM_SH_DMA_MAPPING_H */
|
||||
|
|
Loading…
Reference in New Issue
Block a user