kernel_optimize_test/arch/sparc/include/asm/dma-mapping.h
FUJITA Tomonori 349004294c dma-mapping: sparc: unify 32bit and 64bit dma_set_mask
This patchset transforms the PCI DMA API into the generic device model.
It's one of the reasons why we introduced the generic DMA API long ago;
driver writers are always able to use the generic DMA API with any bus
instead of using bus specific DMA APIs such as pci_map_single,
sbus_map_single, etc (only two bus specific APIs exist now; pci and ssb).

Some of the PCI DMA API are already implented on the top of the generic
DMA API (include/asm-generic/pci-dma-compat.h).  But there are some
exceptions.  This patchset finishes the transformation.

This patch:

sparc has two dma_set_mask implementations for 32bit and 64bit.  They are
same except for the error returned value.  We can safely unify them since
the error returned value doesn't matter as long as it is negative (as
DMA-API.txt describes).

This patch also changes dma_set_mask not to call
pci_set_dma_mask. Instead, dma_set_mask does the same thing that
pci_set_dma_mask does. This change enables ut to change
pci_set_dma_mask to call dma_set_mask; we can implement
pci_set_dma_mask as pci-dma-compat.h does.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: David Miller <davem@davemloft.net>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: James Bottomley <James.Bottomley@suse.de>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Greg KH <greg@kroah.com>
Cc: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-12 15:52:42 -08:00

78 lines
1.9 KiB
C

#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
#include <linux/scatterlist.h>
#include <linux/mm.h>
#include <linux/dma-debug.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern int dma_supported(struct device *dev, u64 mask);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
extern struct dma_map_ops *dma_ops, pci32_dma_ops;
extern struct bus_type pci_bus_type;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (dev->bus == &pci_bus_type)
return &pci32_dma_ops;
#endif
return dma_ops;
}
#include <asm-generic/dma-mapping-common.h>
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return (dma_addr == DMA_ERROR_CODE);
}
static inline int dma_get_cache_alignment(void)
{
/*
* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe
*/
return (1 << INTERNODE_CACHE_SHIFT);
}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EINVAL;
*dev->dma_mask = mask;
return 0;
}
#endif
return -EINVAL;
}
#endif