forked from luck/tmp_suning_uos_patched
8fd5e7a2d9
This adds core architecture support for Imagination's Meta processor cores, followed by some later miscellaneous arch/metag cleanups and fixes which I kept separate to ease review: - Support for basic Meta 1 (ATP) and Meta 2 (HTP) core architecture - A few fixes all over, particularly for symbol prefixes - A few privilege protection fixes - Several cleanups (setup.c includes, split out a lot of metag_ksyms.c) - Fix some missing exports - Convert hugetlb to use vm_unmapped_area() - Copy device tree to non-init memory - Provide dma_get_sgtable() Signed-off-by: James Hogan <james.hogan@imgtec.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (GNU/Linux) iQIcBAABAgAGBQJRMmVXAAoJEKHZs+irPybfivgP/inEXqJyfw59omQdjwvYcU/a /u0MJ3UKSNS3U+HknfaFCy/Nwk1dqPLjqqyVC1V6AbUPBXlaEwGcimlNRx2uRjdq Uh96upMLHsNuF/xiiR477g3RwY0egIJdM1R1bGi3mZ3vVrNQGF+wbni6f61xCWGz M/4rDglpQvE79oLhYdgj6tidZtHQT0YWtERA9W90zkQWXGYmpFPKBKbfZAi5+rKQ U6Gpg26orUugzXNaxltJEYKE8gjLTppEabx8DARnItZ4zCMy4dw5RBJ35RFvQw6e eSmfgTy9w9WqBMY2+QMSgU0KQt1IITCzX7OlOXC0jALQJXoU0WWbOELlBVQLCwF1 T0OcR/5ZP/hIlOk5Dh+e9U3AtbASXdMtqA0ZUe78woH1CBf7Nc/0c0vRg23EdMh8 lnHDJxT/UqskoOYLI4kgWbEdLDy4uTh19U2pVi7VCo7ksLB9Bj9Xc8VSKgscSXTl OwzN+c4Jgtu8FDFTp+Af4AT8pYGJ08j8L2ErsV2sOv3Q44U5WXdrMz3GSgwXj8+4 wZk3HvdkQVkMD5sJCUZgAswaN6BnbB0pHdCz4wMQ8jR/Ogs015Ipk64Ecym9S/4n uES7PnDtt/4lb5EyX2ScbvdnZTAFTaaP7OOhC77BOQvbQjIW1tkAcxWJqRry86uS iM0BFgK6Ohx3geqa5Ft0 =65cR -----END PGP SIGNATURE----- Merge tag 'metag-v3.9-rc1-v4' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag Pull new ImgTec Meta architecture from James Hogan: "This adds core architecture support for Imagination's Meta processor cores, followed by some later miscellaneous arch/metag cleanups and fixes which I kept separate to ease review: - Support for basic Meta 1 (ATP) and Meta 2 (HTP) core architecture - A few fixes all over, particularly for symbol prefixes - A few privilege protection fixes - Several cleanups (setup.c includes, split out a lot of metag_ksyms.c) - Fix some missing exports - Convert hugetlb to use vm_unmapped_area() - Copy device tree to non-init memory - Provide dma_get_sgtable()" * tag 'metag-v3.9-rc1-v4' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag: (61 commits) metag: Provide dma_get_sgtable() metag: prom.h: remove declaration of metag_dt_memblock_reserve() metag: copy devicetree to non-init memory metag: cleanup metag_ksyms.c includes metag: move mm/init.c exports out of metag_ksyms.c metag: move usercopy.c exports out of metag_ksyms.c metag: move setup.c exports out of metag_ksyms.c metag: move kick.c exports out of metag_ksyms.c metag: move traps.c exports out of metag_ksyms.c metag: move irq enable out of irqflags.h on SMP genksyms: fix metag symbol prefix on crc symbols metag: hugetlb: convert to vm_unmapped_area() metag: export clear_page and copy_page metag: export metag_code_cache_flush_all metag: protect more non-MMU memory regions metag: make TXPRIVEXT bits explicit metag: kernel/setup.c: sort includes perf: Enable building perf tools for Meta metag: add boot time LNKGET/LNKSET check metag: add __init to metag_cache_probe() ...
377 lines
8.3 KiB
C
377 lines
8.3 KiB
C
/* Generic I/O port emulation, based on MN10300 code
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public Licence
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the Licence, or (at your option) any later version.
|
|
*/
|
|
#ifndef __ASM_GENERIC_IO_H
|
|
#define __ASM_GENERIC_IO_H
|
|
|
|
#include <asm/page.h> /* I/O is all done through memory accesses */
|
|
#include <linux/types.h>
|
|
|
|
#ifdef CONFIG_GENERIC_IOMAP
|
|
#include <asm-generic/iomap.h>
|
|
#endif
|
|
|
|
#include <asm-generic/pci_iomap.h>
|
|
|
|
#ifndef mmiowb
|
|
#define mmiowb() do {} while (0)
|
|
#endif
|
|
|
|
/*****************************************************************************/
|
|
/*
|
|
* readX/writeX() are used to access memory mapped devices. On some
|
|
* architectures the memory mapped IO stuff needs to be accessed
|
|
* differently. On the simple architectures, we just read/write the
|
|
* memory location directly.
|
|
*/
|
|
#ifndef __raw_readb
|
|
static inline u8 __raw_readb(const volatile void __iomem *addr)
|
|
{
|
|
return *(const volatile u8 __force *) addr;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __raw_readw
|
|
static inline u16 __raw_readw(const volatile void __iomem *addr)
|
|
{
|
|
return *(const volatile u16 __force *) addr;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __raw_readl
|
|
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
|
{
|
|
return *(const volatile u32 __force *) addr;
|
|
}
|
|
#endif
|
|
|
|
#define readb __raw_readb
|
|
|
|
#define readw readw
|
|
static inline u16 readw(const volatile void __iomem *addr)
|
|
{
|
|
return __le16_to_cpu(__raw_readw(addr));
|
|
}
|
|
|
|
#define readl readl
|
|
static inline u32 readl(const volatile void __iomem *addr)
|
|
{
|
|
return __le32_to_cpu(__raw_readl(addr));
|
|
}
|
|
|
|
#ifndef __raw_writeb
|
|
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
|
|
{
|
|
*(volatile u8 __force *) addr = b;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __raw_writew
|
|
static inline void __raw_writew(u16 b, volatile void __iomem *addr)
|
|
{
|
|
*(volatile u16 __force *) addr = b;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __raw_writel
|
|
static inline void __raw_writel(u32 b, volatile void __iomem *addr)
|
|
{
|
|
*(volatile u32 __force *) addr = b;
|
|
}
|
|
#endif
|
|
|
|
#define writeb __raw_writeb
|
|
#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
|
|
#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
|
|
|
|
#ifdef CONFIG_64BIT
|
|
#ifndef __raw_readq
|
|
static inline u64 __raw_readq(const volatile void __iomem *addr)
|
|
{
|
|
return *(const volatile u64 __force *) addr;
|
|
}
|
|
#endif
|
|
|
|
#define readq readq
|
|
static inline u64 readq(const volatile void __iomem *addr)
|
|
{
|
|
return __le64_to_cpu(__raw_readq(addr));
|
|
}
|
|
|
|
#ifndef __raw_writeq
|
|
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
|
|
{
|
|
*(volatile u64 __force *) addr = b;
|
|
}
|
|
#endif
|
|
|
|
#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
#ifndef PCI_IOBASE
|
|
#define PCI_IOBASE ((void __iomem *) 0)
|
|
#endif
|
|
|
|
/*****************************************************************************/
|
|
/*
|
|
* traditional input/output functions
|
|
*/
|
|
|
|
static inline u8 inb(unsigned long addr)
|
|
{
|
|
return readb(addr + PCI_IOBASE);
|
|
}
|
|
|
|
static inline u16 inw(unsigned long addr)
|
|
{
|
|
return readw(addr + PCI_IOBASE);
|
|
}
|
|
|
|
static inline u32 inl(unsigned long addr)
|
|
{
|
|
return readl(addr + PCI_IOBASE);
|
|
}
|
|
|
|
static inline void outb(u8 b, unsigned long addr)
|
|
{
|
|
writeb(b, addr + PCI_IOBASE);
|
|
}
|
|
|
|
static inline void outw(u16 b, unsigned long addr)
|
|
{
|
|
writew(b, addr + PCI_IOBASE);
|
|
}
|
|
|
|
static inline void outl(u32 b, unsigned long addr)
|
|
{
|
|
writel(b, addr + PCI_IOBASE);
|
|
}
|
|
|
|
#define inb_p(addr) inb(addr)
|
|
#define inw_p(addr) inw(addr)
|
|
#define inl_p(addr) inl(addr)
|
|
#define outb_p(x, addr) outb((x), (addr))
|
|
#define outw_p(x, addr) outw((x), (addr))
|
|
#define outl_p(x, addr) outl((x), (addr))
|
|
|
|
#ifndef insb
|
|
static inline void insb(unsigned long addr, void *buffer, int count)
|
|
{
|
|
if (count) {
|
|
u8 *buf = buffer;
|
|
do {
|
|
u8 x = __raw_readb(addr + PCI_IOBASE);
|
|
*buf++ = x;
|
|
} while (--count);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifndef insw
|
|
static inline void insw(unsigned long addr, void *buffer, int count)
|
|
{
|
|
if (count) {
|
|
u16 *buf = buffer;
|
|
do {
|
|
u16 x = __raw_readw(addr + PCI_IOBASE);
|
|
*buf++ = x;
|
|
} while (--count);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifndef insl
|
|
static inline void insl(unsigned long addr, void *buffer, int count)
|
|
{
|
|
if (count) {
|
|
u32 *buf = buffer;
|
|
do {
|
|
u32 x = __raw_readl(addr + PCI_IOBASE);
|
|
*buf++ = x;
|
|
} while (--count);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifndef outsb
|
|
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
|
{
|
|
if (count) {
|
|
const u8 *buf = buffer;
|
|
do {
|
|
__raw_writeb(*buf++, addr + PCI_IOBASE);
|
|
} while (--count);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifndef outsw
|
|
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
|
{
|
|
if (count) {
|
|
const u16 *buf = buffer;
|
|
do {
|
|
__raw_writew(*buf++, addr + PCI_IOBASE);
|
|
} while (--count);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifndef outsl
|
|
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
|
{
|
|
if (count) {
|
|
const u32 *buf = buffer;
|
|
do {
|
|
__raw_writel(*buf++, addr + PCI_IOBASE);
|
|
} while (--count);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifndef CONFIG_GENERIC_IOMAP
|
|
#define ioread8(addr) readb(addr)
|
|
#define ioread16(addr) readw(addr)
|
|
#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr))
|
|
#define ioread32(addr) readl(addr)
|
|
#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr))
|
|
|
|
#define iowrite8(v, addr) writeb((v), (addr))
|
|
#define iowrite16(v, addr) writew((v), (addr))
|
|
#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr)
|
|
#define iowrite32(v, addr) writel((v), (addr))
|
|
#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr)
|
|
|
|
#define ioread8_rep(p, dst, count) \
|
|
insb((unsigned long) (p), (dst), (count))
|
|
#define ioread16_rep(p, dst, count) \
|
|
insw((unsigned long) (p), (dst), (count))
|
|
#define ioread32_rep(p, dst, count) \
|
|
insl((unsigned long) (p), (dst), (count))
|
|
|
|
#define iowrite8_rep(p, src, count) \
|
|
outsb((unsigned long) (p), (src), (count))
|
|
#define iowrite16_rep(p, src, count) \
|
|
outsw((unsigned long) (p), (src), (count))
|
|
#define iowrite32_rep(p, src, count) \
|
|
outsl((unsigned long) (p), (src), (count))
|
|
#endif /* CONFIG_GENERIC_IOMAP */
|
|
|
|
#ifndef IO_SPACE_LIMIT
|
|
#define IO_SPACE_LIMIT 0xffff
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/vmalloc.h>
|
|
#define __io_virt(x) ((void __force *) (x))
|
|
|
|
#ifndef CONFIG_GENERIC_IOMAP
|
|
struct pci_dev;
|
|
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
|
|
|
|
#ifndef pci_iounmap
|
|
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
|
|
{
|
|
}
|
|
#endif
|
|
#endif /* CONFIG_GENERIC_IOMAP */
|
|
|
|
/*
|
|
* Change virtual addresses to physical addresses and vv.
|
|
* These are pretty trivial
|
|
*/
|
|
#ifndef virt_to_phys
|
|
static inline unsigned long virt_to_phys(volatile void *address)
|
|
{
|
|
return __pa((unsigned long)address);
|
|
}
|
|
|
|
static inline void *phys_to_virt(unsigned long address)
|
|
{
|
|
return __va(address);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Change "struct page" to physical address.
|
|
*
|
|
* This implementation is for the no-MMU case only... if you have an MMU
|
|
* you'll need to provide your own definitions.
|
|
*/
|
|
#ifndef CONFIG_MMU
|
|
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
|
|
{
|
|
return (void __iomem*) (unsigned long)offset;
|
|
}
|
|
|
|
#define __ioremap(offset, size, flags) ioremap(offset, size)
|
|
|
|
#ifndef ioremap_nocache
|
|
#define ioremap_nocache ioremap
|
|
#endif
|
|
|
|
#ifndef ioremap_wc
|
|
#define ioremap_wc ioremap_nocache
|
|
#endif
|
|
|
|
static inline void iounmap(void __iomem *addr)
|
|
{
|
|
}
|
|
#endif /* CONFIG_MMU */
|
|
|
|
#ifdef CONFIG_HAS_IOPORT
|
|
#ifndef CONFIG_GENERIC_IOMAP
|
|
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
|
{
|
|
return (void __iomem *) port;
|
|
}
|
|
|
|
static inline void ioport_unmap(void __iomem *p)
|
|
{
|
|
}
|
|
#else /* CONFIG_GENERIC_IOMAP */
|
|
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
|
|
extern void ioport_unmap(void __iomem *p);
|
|
#endif /* CONFIG_GENERIC_IOMAP */
|
|
#endif /* CONFIG_HAS_IOPORT */
|
|
|
|
#define xlate_dev_kmem_ptr(p) p
|
|
#define xlate_dev_mem_ptr(p) __va(p)
|
|
|
|
#ifdef CONFIG_VIRT_TO_BUS
|
|
#ifndef virt_to_bus
|
|
static inline unsigned long virt_to_bus(volatile void *address)
|
|
{
|
|
return ((unsigned long) address);
|
|
}
|
|
|
|
static inline void *bus_to_virt(unsigned long address)
|
|
{
|
|
return (void *) address;
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
#ifndef memset_io
|
|
#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
|
|
#endif
|
|
|
|
#ifndef memcpy_fromio
|
|
#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
|
|
#endif
|
|
#ifndef memcpy_toio
|
|
#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
|
|
#endif
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* __ASM_GENERIC_IO_H */
|