Merge branch 'from-linus' into upstream

This commit is contained in:
John W. Linville 2006-09-11 14:08:41 -04:00
commit 623b3e1d64
204 changed files with 4071 additions and 1915 deletions

View File

@ -120,6 +120,13 @@ Who: Adrian Bunk <bunk@stusta.de>
---------------------------
What: drivers depending on OSS_OBSOLETE_DRIVER
When: options in 2.6.20, code in 2.6.22
Why: OSS drivers with ALSA replacements
Who: Adrian Bunk <bunk@stusta.de>
---------------------------
What: pci_module_init(driver)
When: January 2007
Why: Is replaced by pci_register_driver(pci_driver).

View File

@ -1183,6 +1183,8 @@ running once the system is up.
Mechanism 2.
nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI
Configuration
mmconf [IA-32,X86_64] Force MMCONFIG. This is useful
to override the builtin blacklist.
nomsi [MSI] If the PCI_MSI kernel config parameter is
enabled, this kernel boot option can be used to
disable the use of MSI interrupts system-wide.

View File

@ -1136,10 +1136,10 @@ Sense and level information should be encoded as follows:
Devices connected to openPIC-compatible controllers should encode
sense and polarity as follows:
0 = high to low edge sensitive type enabled
0 = low to high edge sensitive type enabled
1 = active low level sensitive type enabled
2 = low to high edge sensitive type enabled
3 = active high level sensitive type enabled
2 = active high level sensitive type enabled
3 = high to low edge sensitive type enabled
ISA PIC interrupt controllers should adhere to the ISA PIC
encodings listed below:

View File

@ -3308,10 +3308,11 @@ S: Maintained
XFS FILESYSTEM
P: Silicon Graphics Inc
P: Tim Shimmin, David Chatterton
M: xfs-masters@oss.sgi.com
M: nathans@sgi.com
L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs
T: git git://oss.sgi.com:8090/xfs/xfs-2.6
S: Supported
X86 3-LEVEL PAGING (PAE) SUPPORT

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 18
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME=Crazed Snow-Weasel
# *DOCUMENTATION*

View File

@ -621,9 +621,8 @@ CONFIG_AT91_WATCHDOG=y
# USB-based Watchdog Cards
#
# CONFIG_USBPCWATCHDOG is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_NVRAM is not set
CONFIG_RTC=y
# CONFIG_AT91_RTC is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
@ -956,9 +955,41 @@ CONFIG_USB_AT91=y
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
CONFIG_MMC_BLOCK=y
# CONFIG_MMC_WBSD is not set
CONFIG_MMC_AT91RM9200=y
#
# Real Time Clock
#
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
CONFIG_RTC_HCTOSYS_DEVICE="rtc1"
#
# RTC interfaces
#
# CONFIG_RTC_INTF_SYSFS is not set
CONFIG_RTC_INTF_PROC=y
CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
#
# RTC drivers
#
# CONFIG_RTC_DRV_X1205 is not set
CONFIG_RTC_DRV_DS1307=y
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_ISL1208 is not set
# CONFIG_RTC_DRV_DS1672 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_RS5C372 is not set
# CONFIG_RTC_DRV_M48T86 is not set
CONFIG_RTC_DRV_AT91=y
# CONFIG_RTC_DRV_TEST is not set
# CONFIG_RTC_DRV_V3020 is not set
#
# File systems
#

View File

@ -60,7 +60,7 @@ static void __iomem *dma_base;
static kmem_cache_t *dma_kmem;
/* dma channel state information */
s3c2410_dma_chan_t s3c2410_chans[S3C2410_DMA_CHANNELS];
struct s3c2410_dma_chan s3c2410_chans[S3C2410_DMA_CHANNELS];
/* debugging functions */
@ -74,7 +74,7 @@ s3c2410_dma_chan_t s3c2410_chans[S3C2410_DMA_CHANNELS];
#define dma_wrreg(chan, reg, val) writel((val), (chan)->regs + (reg))
#else
static inline void
dma_wrreg(s3c2410_dma_chan_t *chan, int reg, unsigned long val)
dma_wrreg(struct s3c2410_dma_chan *chan, int reg, unsigned long val)
{
pr_debug("writing %08x to register %08x\n",(unsigned int)val,reg);
writel(val, dma_regaddr(chan, reg));
@ -102,7 +102,7 @@ struct s3c2410_dma_regstate {
*/
static void
dmadbg_capture(s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs)
dmadbg_capture(struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs)
{
regs->dcsrc = dma_rdreg(chan, S3C2410_DMA_DCSRC);
regs->disrc = dma_rdreg(chan, S3C2410_DMA_DISRC);
@ -112,7 +112,7 @@ dmadbg_capture(s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs)
}
static void
dmadbg_dumpregs(const char *fname, int line, s3c2410_dma_chan_t *chan,
dmadbg_dumpregs(const char *fname, int line, struct s3c2410_dma_chan *chan,
struct s3c2410_dma_regstate *regs)
{
printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
@ -122,7 +122,7 @@ dmadbg_dumpregs(const char *fname, int line, s3c2410_dma_chan_t *chan,
}
static void
dmadbg_showchan(const char *fname, int line, s3c2410_dma_chan_t *chan)
dmadbg_showchan(const char *fname, int line, struct s3c2410_dma_chan *chan)
{
struct s3c2410_dma_regstate state;
@ -136,7 +136,7 @@ dmadbg_showchan(const char *fname, int line, s3c2410_dma_chan_t *chan)
}
static void
dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan)
dmadbg_showregs(const char *fname, int line, struct s3c2410_dma_chan *chan)
{
struct s3c2410_dma_regstate state;
@ -164,7 +164,7 @@ dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan)
*/
static void
s3c2410_dma_stats_timeout(s3c2410_dma_stats_t *stats, int val)
s3c2410_dma_stats_timeout(struct s3c2410_dma_stats *stats, int val)
{
if (stats == NULL)
return;
@ -183,7 +183,7 @@ s3c2410_dma_stats_timeout(s3c2410_dma_stats_t *stats, int val)
*/
static int
s3c2410_dma_waitforload(s3c2410_dma_chan_t *chan, int line)
s3c2410_dma_waitforload(struct s3c2410_dma_chan *chan, int line)
{
int timeout = chan->load_timeout;
int took;
@ -230,8 +230,8 @@ s3c2410_dma_waitforload(s3c2410_dma_chan_t *chan, int line)
*/
static inline int
s3c2410_dma_loadbuffer(s3c2410_dma_chan_t *chan,
s3c2410_dma_buf_t *buf)
s3c2410_dma_loadbuffer(struct s3c2410_dma_chan *chan,
struct s3c2410_dma_buf *buf)
{
unsigned long reload;
@ -304,7 +304,7 @@ s3c2410_dma_loadbuffer(s3c2410_dma_chan_t *chan,
*/
static void
s3c2410_dma_call_op(s3c2410_dma_chan_t *chan, s3c2410_chan_op_t op)
s3c2410_dma_call_op(struct s3c2410_dma_chan *chan, enum s3c2410_chan_op op)
{
if (chan->op_fn != NULL) {
(chan->op_fn)(chan, op);
@ -318,8 +318,8 @@ s3c2410_dma_call_op(s3c2410_dma_chan_t *chan, s3c2410_chan_op_t op)
*/
static inline void
s3c2410_dma_buffdone(s3c2410_dma_chan_t *chan, s3c2410_dma_buf_t *buf,
s3c2410_dma_buffresult_t result)
s3c2410_dma_buffdone(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf,
enum s3c2410_dma_buffresult result)
{
pr_debug("callback_fn=%p, buf=%p, id=%p, size=%d, result=%d\n",
chan->callback_fn, buf, buf->id, buf->size, result);
@ -334,7 +334,7 @@ s3c2410_dma_buffdone(s3c2410_dma_chan_t *chan, s3c2410_dma_buf_t *buf,
* start a dma channel going
*/
static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
static int s3c2410_dma_start(struct s3c2410_dma_chan *chan)
{
unsigned long tmp;
unsigned long flags;
@ -430,7 +430,7 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
*/
static int
s3c2410_dma_canload(s3c2410_dma_chan_t *chan)
s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
{
if (chan->load_state == S3C2410_DMALOAD_NONE ||
chan->load_state == S3C2410_DMALOAD_1RUNNING)
@ -460,8 +460,8 @@ s3c2410_dma_canload(s3c2410_dma_chan_t *chan)
int s3c2410_dma_enqueue(unsigned int channel, void *id,
dma_addr_t data, int size)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
s3c2410_dma_buf_t *buf;
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
struct s3c2410_dma_buf *buf;
unsigned long flags;
check_channel(channel);
@ -540,7 +540,7 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id,
EXPORT_SYMBOL(s3c2410_dma_enqueue);
static inline void
s3c2410_dma_freebuf(s3c2410_dma_buf_t *buf)
s3c2410_dma_freebuf(struct s3c2410_dma_buf *buf)
{
int magicok = (buf->magic == BUF_MAGIC);
@ -560,7 +560,7 @@ s3c2410_dma_freebuf(s3c2410_dma_buf_t *buf)
*/
static inline void
s3c2410_dma_lastxfer(s3c2410_dma_chan_t *chan)
s3c2410_dma_lastxfer(struct s3c2410_dma_chan *chan)
{
pr_debug("dma%d: s3c2410_dma_lastxfer: load_state %d\n",
chan->number, chan->load_state);
@ -601,8 +601,8 @@ s3c2410_dma_lastxfer(s3c2410_dma_chan_t *chan)
static irqreturn_t
s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
{
s3c2410_dma_chan_t *chan = (s3c2410_dma_chan_t *)devpw;
s3c2410_dma_buf_t *buf;
struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw;
struct s3c2410_dma_buf *buf;
buf = chan->curr;
@ -731,10 +731,10 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
* get control of an dma channel
*/
int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
int s3c2410_dma_request(unsigned int channel, struct s3c2410_dma_client *client,
void *dev)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
unsigned long flags;
int err;
@ -807,9 +807,9 @@ EXPORT_SYMBOL(s3c2410_dma_request);
* allowed to go through.
*/
int s3c2410_dma_free(dmach_t channel, s3c2410_dma_client_t *client)
int s3c2410_dma_free(dmach_t channel, struct s3c2410_dma_client *client)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
unsigned long flags;
check_channel(channel);
@ -846,7 +846,7 @@ int s3c2410_dma_free(dmach_t channel, s3c2410_dma_client_t *client)
EXPORT_SYMBOL(s3c2410_dma_free);
static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
static int s3c2410_dma_dostop(struct s3c2410_dma_chan *chan)
{
unsigned long tmp;
unsigned long flags;
@ -880,7 +880,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
return 0;
}
void s3c2410_dma_waitforstop(s3c2410_dma_chan_t *chan)
void s3c2410_dma_waitforstop(struct s3c2410_dma_chan *chan)
{
unsigned long tmp;
unsigned int timeout = 0x10000;
@ -901,9 +901,9 @@ void s3c2410_dma_waitforstop(s3c2410_dma_chan_t *chan)
* stop the channel, and remove all current and pending transfers
*/
static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan)
{
s3c2410_dma_buf_t *buf, *next;
struct s3c2410_dma_buf *buf, *next;
unsigned long flags;
pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number);
@ -958,7 +958,7 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
}
int
s3c2410_dma_started(s3c2410_dma_chan_t *chan)
s3c2410_dma_started(struct s3c2410_dma_chan *chan)
{
unsigned long flags;
@ -995,9 +995,9 @@ s3c2410_dma_started(s3c2410_dma_chan_t *chan)
}
int
s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
s3c2410_dma_ctrl(dmach_t channel, enum s3c2410_chan_op op)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
check_channel(channel);
@ -1046,7 +1046,7 @@ int s3c2410_dma_config(dmach_t channel,
int xferunit,
int dcon)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n",
__FUNCTION__, channel, xferunit, dcon);
@ -1086,7 +1086,7 @@ EXPORT_SYMBOL(s3c2410_dma_config);
int s3c2410_dma_setflags(dmach_t channel, unsigned int flags)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
check_channel(channel);
@ -1106,7 +1106,7 @@ EXPORT_SYMBOL(s3c2410_dma_setflags);
int s3c2410_dma_set_opfn(dmach_t channel, s3c2410_dma_opfn_t rtn)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
check_channel(channel);
@ -1121,7 +1121,7 @@ EXPORT_SYMBOL(s3c2410_dma_set_opfn);
int s3c2410_dma_set_buffdone_fn(dmach_t channel, s3c2410_dma_cbfn_t rtn)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
check_channel(channel);
@ -1149,11 +1149,11 @@ EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
*/
int s3c2410_dma_devconfig(int channel,
s3c2410_dmasrc_t source,
enum s3c2410_dmasrc source,
int hwcfg,
unsigned long devaddr)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
check_channel(channel);
@ -1200,7 +1200,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig);
int s3c2410_dma_getposition(dmach_t channel, dma_addr_t *src, dma_addr_t *dst)
{
s3c2410_dma_chan_t *chan = &s3c2410_chans[channel];
struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
check_channel(channel);
@ -1222,7 +1222,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state)
{
s3c2410_dma_chan_t *cp = container_of(dev, s3c2410_dma_chan_t, dev);
struct s3c2410_dma_chan *cp = container_of(dev, struct s3c2410_dma_chan, dev);
printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
@ -1262,7 +1262,7 @@ static struct sysdev_class dma_sysclass = {
static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
{
memset(p, 0, sizeof(s3c2410_dma_buf_t));
memset(p, 0, sizeof(struct s3c2410_dma_buf));
}
@ -1270,7 +1270,7 @@ static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
static int __init s3c2410_init_dma(void)
{
s3c2410_dma_chan_t *cp;
struct s3c2410_dma_chan *cp;
int channel;
int ret;
@ -1288,7 +1288,7 @@ static int __init s3c2410_init_dma(void)
goto err;
}
dma_kmem = kmem_cache_create("dma_desc", sizeof(s3c2410_dma_buf_t), 0,
dma_kmem = kmem_cache_create("dma_desc", sizeof(struct s3c2410_dma_buf), 0,
SLAB_HWCACHE_ALIGN,
s3c2410_dma_cache_ctor, NULL);
@ -1301,7 +1301,7 @@ static int __init s3c2410_init_dma(void)
for (channel = 0; channel < S3C2410_DMA_CHANNELS; channel++) {
cp = &s3c2410_chans[channel];
memset(cp, 0, sizeof(s3c2410_dma_chan_t));
memset(cp, 0, sizeof(struct s3c2410_dma_chan));
/* dma channel irqs are in order.. */
cp->number = channel;

View File

@ -87,6 +87,32 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
if (cache_is_vipt_aliasing())
flush_pfn_alias(pfn, user_addr);
}
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len, int write)
{
if (cache_is_vivt()) {
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
return;
}
if (cache_is_vipt_aliasing()) {
flush_pfn_alias(page_to_pfn(page), uaddr);
return;
}
/* VIPT non-aliasing cache */
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
vma->vm_flags | VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
/* only flushing the kernel mapping on non-aliasing VIPT */
__cpuc_coherent_kern_range(addr, addr + len);
}
}
#else
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
#endif

View File

@ -156,7 +156,7 @@ struct vfp_single {
};
extern s32 vfp_get_float(unsigned int reg);
extern void vfp_put_float(unsigned int reg, s32 val);
extern void vfp_put_float(s32 val, unsigned int reg);
/*
* VFP_SINGLE_MANTISSA_BITS - number of bits in the mantissa
@ -267,7 +267,7 @@ struct vfp_double {
*/
#define VFP_REG_ZERO 16
extern u64 vfp_get_double(unsigned int reg);
extern void vfp_put_double(unsigned int reg, u64 val);
extern void vfp_put_double(u64 val, unsigned int reg);
#define VFP_DOUBLE_MANTISSA_BITS (52)
#define VFP_DOUBLE_EXPONENT_BITS (11)
@ -341,12 +341,6 @@ static inline int vfp_double_type(struct vfp_double *s)
u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func);
/*
* System registers
*/
extern u32 vfp_get_sys(unsigned int reg);
extern void vfp_put_sys(unsigned int reg, u32 val);
u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand);
/*

View File

@ -195,7 +195,7 @@ u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exce
s64 d = vfp_double_pack(vd);
pr_debug("VFP: %s: d(d%d)=%016llx exceptions=%08x\n", func,
dd, d, exceptions);
vfp_put_double(dd, d);
vfp_put_double(d, dd);
}
return exceptions;
}
@ -250,19 +250,19 @@ vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn,
*/
static u32 vfp_double_fabs(int dd, int unused, int dm, u32 fpscr)
{
vfp_put_double(dd, vfp_double_packed_abs(vfp_get_double(dm)));
vfp_put_double(vfp_double_packed_abs(vfp_get_double(dm)), dd);
return 0;
}
static u32 vfp_double_fcpy(int dd, int unused, int dm, u32 fpscr)
{
vfp_put_double(dd, vfp_get_double(dm));
vfp_put_double(vfp_get_double(dm), dd);
return 0;
}
static u32 vfp_double_fneg(int dd, int unused, int dm, u32 fpscr)
{
vfp_put_double(dd, vfp_double_packed_negate(vfp_get_double(dm)));
vfp_put_double(vfp_double_packed_negate(vfp_get_double(dm)), dd);
return 0;
}
@ -287,7 +287,7 @@ static u32 vfp_double_fsqrt(int dd, int unused, int dm, u32 fpscr)
vdp = &vfp_double_default_qnan;
ret = FPSCR_IOC;
}
vfp_put_double(dd, vfp_double_pack(vdp));
vfp_put_double(vfp_double_pack(vdp), dd);
return ret;
}
@ -476,7 +476,7 @@ static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr)
return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fcvts");
pack_nan:
vfp_put_float(sd, vfp_single_pack(&vsd));
vfp_put_float(vfp_single_pack(&vsd), sd);
return exceptions;
}
@ -573,7 +573,7 @@ static u32 vfp_double_ftoui(int sd, int unused, int dm, u32 fpscr)
pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
vfp_put_float(sd, d);
vfp_put_float(d, sd);
return exceptions;
}
@ -648,7 +648,7 @@ static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr)
pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
vfp_put_float(sd, (s32)d);
vfp_put_float((s32)d, sd);
return exceptions;
}
@ -1084,7 +1084,7 @@ static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr)
vdn_nan:
exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
pack:
vfp_put_double(dd, vfp_double_pack(&vdd));
vfp_put_double(vfp_double_pack(&vdd), dd);
return exceptions;
vdm_nan:
@ -1104,7 +1104,7 @@ static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr)
goto pack;
invalid:
vfp_put_double(dd, vfp_double_pack(&vfp_double_default_qnan));
vfp_put_double(vfp_double_pack(&vfp_double_default_qnan), dd);
return FPSCR_IOC;
}

View File

@ -178,12 +178,12 @@ vfp_get_float:
.globl vfp_put_float
vfp_put_float:
add pc, pc, r0, lsl #3
add pc, pc, r1, lsl #3
mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
mcr p10, 0, r1, c\dr, c0, 0 @ fmsr r0, s0
mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
mov pc, lr
mcr p10, 0, r1, c\dr, c0, 4 @ fmsr r0, s1
mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
mov pc, lr
.endr
@ -203,9 +203,9 @@ vfp_get_double:
.globl vfp_put_double
vfp_put_double:
add pc, pc, r0, lsl #3
add pc, pc, r2, lsl #3
mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
fmdrr d\dr, r1, r2
fmdrr d\dr, r0, r1
mov pc, lr
.endr

View File

@ -200,7 +200,7 @@ u32 vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exce
s32 d = vfp_single_pack(vs);
pr_debug("VFP: %s: d(s%d)=%08x exceptions=%08x\n", func,
sd, d, exceptions);
vfp_put_float(sd, d);
vfp_put_float(d, sd);
}
return exceptions;
@ -257,19 +257,19 @@ vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn,
*/
static u32 vfp_single_fabs(int sd, int unused, s32 m, u32 fpscr)
{
vfp_put_float(sd, vfp_single_packed_abs(m));
vfp_put_float(vfp_single_packed_abs(m), sd);
return 0;
}
static u32 vfp_single_fcpy(int sd, int unused, s32 m, u32 fpscr)
{
vfp_put_float(sd, m);
vfp_put_float(m, sd);
return 0;
}
static u32 vfp_single_fneg(int sd, int unused, s32 m, u32 fpscr)
{
vfp_put_float(sd, vfp_single_packed_negate(m));
vfp_put_float(vfp_single_packed_negate(m), sd);
return 0;
}
@ -333,7 +333,7 @@ static u32 vfp_single_fsqrt(int sd, int unused, s32 m, u32 fpscr)
vsp = &vfp_single_default_qnan;
ret = FPSCR_IOC;
}
vfp_put_float(sd, vfp_single_pack(vsp));
vfp_put_float(vfp_single_pack(vsp), sd);
return ret;
}
@ -517,7 +517,7 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd");
pack_nan:
vfp_put_double(dd, vfp_double_pack(&vdd));
vfp_put_double(vfp_double_pack(&vdd), dd);
return exceptions;
}
@ -613,7 +613,7 @@ static u32 vfp_single_ftoui(int sd, int unused, s32 m, u32 fpscr)
pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
vfp_put_float(sd, d);
vfp_put_float(d, sd);
return exceptions;
}
@ -692,7 +692,7 @@ static u32 vfp_single_ftosi(int sd, int unused, s32 m, u32 fpscr)
pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
vfp_put_float(sd, (s32)d);
vfp_put_float((s32)d, sd);
return exceptions;
}
@ -1127,7 +1127,7 @@ static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr)
vsn_nan:
exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
pack:
vfp_put_float(sd, vfp_single_pack(&vsd));
vfp_put_float(vfp_single_pack(&vsd), sd);
return exceptions;
vsm_nan:
@ -1147,7 +1147,7 @@ static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr)
goto pack;
invalid:
vfp_put_float(sd, vfp_single_pack(&vfp_single_default_qnan));
vfp_put_float(vfp_single_pack(&vfp_single_default_qnan), sd);
return FPSCR_IOC;
}

View File

@ -29,6 +29,10 @@ config GENERIC_HARDIRQS
bool
default n
config GENERIC_TIME
bool
default y
config TIME_LOW_RES
bool
default y

View File

@ -32,8 +32,6 @@
#define TICK_SIZE (tick_nsec / 1000)
extern unsigned long wall_jiffies;
unsigned long __nongprelbss __clkin_clock_speed_HZ;
unsigned long __nongprelbss __ext_bus_clock_speed_HZ;
unsigned long __nongprelbss __res_bus_clock_speed_HZ;
@ -144,85 +142,6 @@ void time_init(void)
time_divisor_init();
}
/*
* This version of gettimeofday has near microsecond resolution.
*/
void do_gettimeofday(struct timeval *tv)
{
unsigned long seq;
unsigned long usec, sec;
unsigned long max_ntp_tick;
do {
unsigned long lost;
seq = read_seqbegin(&xtime_lock);
usec = 0;
lost = jiffies - wall_jiffies;
/*
* If time_adjust is negative then NTP is slowing the clock
* so make sure not to go into next possible interval.
* Better to lose some accuracy than have time go backwards..
*/
if (unlikely(time_adjust < 0)) {
max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
usec = min(usec, max_ntp_tick);
if (lost)
usec += lost * max_ntp_tick;
}
else if (unlikely(lost))
usec += lost * (USEC_PER_SEC / HZ);
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry(&xtime_lock, seq));
while (usec >= 1000000) {
usec -= 1000000;
sec++;
}
tv->tv_sec = sec;
tv->tv_usec = usec;
}
EXPORT_SYMBOL(do_gettimeofday);
int do_settimeofday(struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
* wall time. Discover what correction gettimeofday() would have
* made, and then undo it!
*/
nsec -= 0 * NSEC_PER_USEC;
nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
EXPORT_SYMBOL(do_settimeofday);
/*
* Scheduler clock - returns current time in nanosec units.
*/

View File

@ -317,20 +317,14 @@ is386: movl $2,%ecx # set MP
movl %eax,%gs
lldt %ax
cld # gcc2 wants the direction flag cleared at all times
pushl %eax # fake return address
#ifdef CONFIG_SMP
movb ready, %cl
movb $1, ready
cmpb $0,%cl
je 1f # the first CPU calls start_kernel
# all other CPUs call initialize_secondary
call initialize_secondary
jmp L6
1:
cmpb $0,%cl # the first CPU calls start_kernel
jne initialize_secondary # all other CPUs call initialize_secondary
#endif /* CONFIG_SMP */
call start_kernel
L6:
jmp L6 # main should never return here, but
# just in case, we know what happens.
jmp start_kernel
/*
* We depend on ET to be correct. This checks for 287/387.

View File

@ -35,7 +35,7 @@ static int __init init_hpet_clocksource(void)
void __iomem* hpet_base;
u64 tmp;
if (!hpet_address)
if (!is_hpet_enabled())
return -ENODEV;
/* calculate the hpet address: */

View File

@ -82,10 +82,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
}
#endif
if (!irq_desc[irq].handle_irq) {
__do_IRQ(irq, regs);
goto out_exit;
}
#ifdef CONFIG_4KSTACKS
curctx = (union irq_ctx *) current_thread_info();
@ -125,7 +121,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
#endif
__do_IRQ(irq, regs);
out_exit:
irq_exit();
return 1;

View File

@ -956,38 +956,6 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
return 0;
}
/*
* This function checks if the entire range <start,end> is mapped with type.
*
* Note: this function only works correct if the e820 table is sorted and
* not-overlapping, which is the case
*/
int __init
e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
{
u64 start = s;
u64 end = e;
int i;
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
if (type && ei->type != type)
continue;
/* is the region (part) in overlap with the current region ?*/
if (ei->addr >= end || ei->addr + ei->size <= start)
continue;
/* if the region is at the beginning of <start,end> we move
* start to the end of the region since it's ok until there
*/
if (ei->addr <= start)
start = ei->addr + ei->size;
/* if start is now at or beyond end, we're done, full
* coverage */
if (start >= end)
return 1; /* we're done */
}
return 0;
}
/*
* Find the highest page frame number we have available
*/

View File

@ -92,7 +92,11 @@ asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void);
static int kstack_depth_to_print = 24;
#ifdef CONFIG_STACK_UNWIND
static int call_trace = 1;
#else
#define call_trace (-1)
#endif
ATOMIC_NOTIFIER_HEAD(i386die_chain);
int register_die_notifier(struct notifier_block *nb)
@ -187,22 +191,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (unwind_init_blocked(&info, task) == 0)
unw_ret = show_trace_unwind(&info, log_lvl);
}
if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
#ifdef CONFIG_STACK_UNWIND
print_symbol("DWARF2 unwinder stuck at %s\n",
UNW_PC(&info));
if (call_trace == 1) {
printk("Leftover inexact backtrace:\n");
if (UNW_SP(&info))
if (unw_ret > 0) {
if (call_trace == 1 && !arch_unw_user_mode(&info)) {
print_symbol("DWARF2 unwinder stuck at %s\n",
UNW_PC(&info));
if (UNW_SP(&info) >= PAGE_OFFSET) {
printk("Leftover inexact backtrace:\n");
stack = (void *)UNW_SP(&info);
} else if (call_trace > 1)
} else
printk("Full inexact backtrace again:\n");
} else if (call_trace >= 1)
return;
else
printk("Full inexact backtrace again:\n");
#else
} else
printk("Inexact backtrace:\n");
#endif
}
}
if (task == current) {
@ -1241,6 +1244,7 @@ static int __init kstack_setup(char *s)
}
__setup("kstack=", kstack_setup);
#ifdef CONFIG_STACK_UNWIND
static int __init call_trace_setup(char *s)
{
if (strcmp(s, "old") == 0)
@ -1254,3 +1258,4 @@ static int __init call_trace_setup(char *s)
return 1;
}
__setup("call_trace=", call_trace_setup);
#endif

View File

@ -237,6 +237,11 @@ char * __devinit pcibios_setup(char *str)
pci_probe &= ~PCI_PROBE_MMCONF;
return NULL;
}
/* override DMI blacklist */
else if (!strcmp(str, "mmconf")) {
pci_probe |= PCI_PROBE_MMCONF_FORCE;
return NULL;
}
#endif
else if (!strcmp(str, "noacpi")) {
acpi_noirq_set();

View File

@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <asm/e820.h>
#include "pci.h"
@ -187,9 +188,31 @@ static __init void unreachable_devices(void)
}
}
static int disable_mcfg(struct dmi_system_id *d)
{
printk("PCI: %s detected. Disabling MCFG.\n", d->ident);
pci_probe &= ~PCI_PROBE_MMCONF;
return 0;
}
static struct dmi_system_id __initdata dmi_bad_mcfg[] = {
/* Has broken MCFG table that makes the system hang when used */
{
.callback = disable_mcfg,
.ident = "Intel D3C5105 SDV",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "Intel"),
DMI_MATCH(DMI_BOARD_NAME, "D26928"),
},
},
{}
};
void __init pci_mmcfg_init(void)
{
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
dmi_check_system(dmi_bad_mcfg);
if ((pci_probe & (PCI_PROBE_MMCONF_FORCE|PCI_PROBE_MMCONF)) == 0)
return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
@ -198,15 +221,6 @@ void __init pci_mmcfg_init(void)
(pci_mmcfg_config[0].base_address == 0))
return;
if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
printk(KERN_INFO "PCI: Using MMCONFIG\n");
raw_pci_ops = &pci_mmcfg;
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;

View File

@ -16,7 +16,8 @@
#define PCI_PROBE_CONF1 0x0002
#define PCI_PROBE_CONF2 0x0004
#define PCI_PROBE_MMCONF 0x0008
#define PCI_PROBE_MASK 0x000f
#define PCI_PROBE_MMCONF_FORCE 0x0010
#define PCI_PROBE_MASK 0x00ff
#define PCI_NO_SORT 0x0100
#define PCI_BIOS_SORT 0x0200

View File

@ -258,7 +258,7 @@ config NR_CPUS
int "Maximum number of CPUs (2-1024)"
range 2 1024
depends on SMP
default "64"
default "1024"
help
You should set this to the number of CPUs in your system, but
keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but
@ -354,7 +354,7 @@ config NUMA
config NODES_SHIFT
int "Max num nodes shift(3-10)"
range 3 10
default "8"
default "10"
depends on NEED_MULTIPLE_NODES
help
This option specifies the maximum number of nodes in your SSI system.

View File

@ -1605,8 +1605,8 @@ sys_call_table:
data8 sys_ni_syscall // 1295 reserved for ppoll
data8 sys_unshare
data8 sys_splice
data8 sys_set_robust_list
data8 sys_get_robust_list
data8 sys_ni_syscall // reserved for set_robust_list
data8 sys_ni_syscall // reserved for get_robust_list
data8 sys_sync_file_range // 1300
data8 sys_tee
data8 sys_vmsplice

View File

@ -197,6 +197,11 @@ start_ap:
;;
srlz.i
;;
{
flushrs // must be first insn in group
srlz.i
}
;;
/*
* Save the region registers, predicate before they get clobbered
*/

View File

@ -4936,13 +4936,15 @@ sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
if (likely(ctx)) {
DPRINT(("context unlocked\n"));
UNPROTECT_CTX(ctx, flags);
fput(file);
}
/* copy argument back to user, if needed */
if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
error_args:
if (file)
fput(file);
kfree(args_k);
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));

View File

@ -163,10 +163,25 @@ sys_pipe (void)
return retval;
}
int ia64_mmap_check(unsigned long addr, unsigned long len,
unsigned long flags)
{
unsigned long roff;
/*
* Don't permit mappings into unmapped space, the virtual page table
* of a region, or across a region boundary. Note: RGN_MAP_LIMIT is
* equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0.
*/
roff = REGION_OFFSET(addr);
if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len)))
return -EINVAL;
return 0;
}
static inline unsigned long
do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
{
unsigned long roff;
struct file *file = NULL;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
@ -188,17 +203,6 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
goto out;
}
/*
* Don't permit mappings into unmapped space, the virtual page table of a region,
* or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
* (for some integer n <= 61) and len > 0.
*/
roff = REGION_OFFSET(addr);
if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) {
addr = -EINVAL;
goto out;
}
down_write(&current->mm->mmap_sem);
addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
up_write(&current->mm->mmap_sem);

View File

@ -67,10 +67,8 @@ static int __init topology_init(void)
#endif
sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
if (!sysfs_cpus) {
err = -ENOMEM;
goto out;
}
if (!sysfs_cpus)
panic("kzalloc in topology_init failed - NR_CPUS too big?");
for_each_present_cpu(i) {
if((err = arch_register_cpu(i)))

View File

@ -565,7 +565,7 @@ static void __init sn_init_pdas(char **cmdline_p)
* Also sets up a few fields in the nodepda. Also known as
* platform_cpu_init() by the ia64 machvec code.
*/
void __init sn_cpu_init(void)
void __cpuinit sn_cpu_init(void)
{
int cpuid;
int cpuphyid;

View File

@ -279,8 +279,8 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
return part->reason;
}
bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
(u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
(BTE_NORMAL | BTE_WACQUIRE), NULL);
if (bte_ret == BTE_SUCCESS) {
return xpcSuccess;
}

View File

@ -1052,6 +1052,8 @@ xpc_do_exit(enum xpc_retval reason)
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
kfree(xpc_remote_copy_buffer_base);
}
@ -1212,24 +1214,20 @@ xpc_init(void)
partid_t partid;
struct xpc_partition *part;
pid_t pid;
size_t buf_size;
if (!ia64_platform_is("sn2")) {
return -ENODEV;
}
/*
* xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng
* various portions of a partition's reserved page. Its size is based
* on the size of the reserved page header and part_nasids mask. So we
* need to ensure that the other items will fit as well.
*/
if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) {
dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
return -EPERM;
}
DBUG_ON((u64) xpc_remote_copy_buffer !=
L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
buf_size = max(XPC_RP_VARS_SIZE,
XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
GFP_KERNEL, &xpc_remote_copy_buffer_base);
if (xpc_remote_copy_buffer == NULL)
return -ENOMEM;
snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
@ -1293,6 +1291,8 @@ xpc_init(void)
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
kfree(xpc_remote_copy_buffer_base);
return -EBUSY;
}
@ -1311,6 +1311,8 @@ xpc_init(void)
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
kfree(xpc_remote_copy_buffer_base);
return -EBUSY;
}
@ -1362,6 +1364,8 @@ xpc_init(void)
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
kfree(xpc_remote_copy_buffer_base);
return -EBUSY;
}

View File

@ -71,19 +71,15 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
* Generic buffer used to store a local copy of portions of a remote
* partition's reserved page (either its header and part_nasids mask,
* or its vars).
*
* xpc_discovery runs only once and is a seperate thread that is
* very likely going to be processing in parallel with receiving
* interrupts.
*/
char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE +
XP_NASID_MASK_BYTES];
char *xpc_remote_copy_buffer;
void *xpc_remote_copy_buffer_base;
/*
* Guarantee that the kmalloc'd memory is cacheline aligned.
*/
static void *
void *
xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kmalloc will give us cachline aligned memory by default */
@ -148,7 +144,7 @@ xpc_get_rsvd_page_pa(int nasid)
}
}
bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len,
bte_res = xp_bte_copy(rp_pa, buf, buf_len,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bte_res != BTE_SUCCESS) {
dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
@ -447,7 +443,7 @@ xpc_check_remote_hb(void)
/* pull the remote_hb cache line */
bres = xp_bte_copy(part->remote_vars_pa,
ia64_tpa((u64) remote_vars),
(u64) remote_vars,
XPC_RP_VARS_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
@ -498,8 +494,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
/* pull over the reserved page header and part_nasids mask */
bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
@ -554,11 +549,8 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
return xpcVarsNotSet;
}
/* pull over the cross partition variables */
bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
XPC_RP_VARS_SIZE,
bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
return xpc_map_bte_errors(bres);
@ -1239,7 +1231,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
return xpc_map_bte_errors(bte_res);

View File

@ -354,6 +354,7 @@ endchoice
config PPC_PSERIES
depends on PPC_MULTIPLATFORM && PPC64
bool "IBM pSeries & new (POWER5-based) iSeries"
select MPIC
select PPC_I8259
select PPC_RTAS
select RTAS_ERROR_LOGGING
@ -363,6 +364,7 @@ config PPC_PSERIES
config PPC_CHRP
bool "Common Hardware Reference Platform (CHRP) based machines"
depends on PPC_MULTIPLATFORM && PPC32
select MPIC
select PPC_I8259
select PPC_INDIRECT_PCI
select PPC_RTAS
@ -373,6 +375,7 @@ config PPC_CHRP
config PPC_PMAC
bool "Apple PowerMac based machines"
depends on PPC_MULTIPLATFORM
select MPIC
select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32
default y
@ -380,6 +383,7 @@ config PPC_PMAC
config PPC_PMAC64
bool
depends on PPC_PMAC && POWER4
select MPIC
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
@ -389,6 +393,7 @@ config PPC_PMAC64
config PPC_PREP
bool "PowerPC Reference Platform (PReP) based machines"
depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
select MPIC
select PPC_I8259
select PPC_INDIRECT_PCI
select PPC_UDBG_16550
@ -397,6 +402,7 @@ config PPC_PREP
config PPC_MAPLE
depends on PPC_MULTIPLATFORM && PPC64
bool "Maple 970FX Evaluation Board"
select MPIC
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
@ -439,12 +445,6 @@ config U3_DART
depends on PPC_MULTIPLATFORM && PPC64
default n
config MPIC
depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE || PPC_CHRP \
|| MPC7448HPC2
bool
default y
config PPC_RTAS
bool
default n
@ -812,6 +812,14 @@ config GENERIC_ISA_DMA
depends on PPC64 || POWER4 || 6xx && !CPM2
default y
config MPIC
bool
default n
config MPIC_WEIRD
bool
default n
config PPC_I8259
bool
default n

View File

@ -0,0 +1,190 @@
/*
* MPC7448HPC2 (Taiga) board Device Tree Source
*
* Copyright 2006 Freescale Semiconductor Inc.
* 2006 Roy Zang <Roy Zang at freescale.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/ {
model = "mpc7448hpc2";
compatible = "mpc74xx";
#address-cells = <1>;
#size-cells = <1>;
linux,phandle = <100>;
cpus {
#cpus = <1>;
#address-cells = <1>;
#size-cells =<0>;
linux,phandle = <200>;
PowerPC,7448@0 {
device_type = "cpu";
reg = <0>;
d-cache-line-size = <20>; // 32 bytes
i-cache-line-size = <20>; // 32 bytes
d-cache-size = <8000>; // L1, 32K bytes
i-cache-size = <8000>; // L1, 32K bytes
timebase-frequency = <0>; // 33 MHz, from uboot
clock-frequency = <0>; // From U-Boot
bus-frequency = <0>; // From U-Boot
32-bit;
linux,phandle = <201>;
linux,boot-cpu;
};
};
memory {
device_type = "memory";
linux,phandle = <300>;
reg = <00000000 20000000 // DDR2 512M at 0
>;
};
tsi108@c0000000 {
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <2>;
device_type = "tsi-bridge";
ranges = <00000000 c0000000 00010000>;
reg = <c0000000 00010000>;
bus-frequency = <0>;
i2c@7000 {
interrupt-parent = <7400>;
interrupts = <E 0>;
reg = <7000 400>;
device_type = "i2c";
compatible = "tsi-i2c";
};
mdio@6000 {
device_type = "mdio";
compatible = "tsi-ethernet";
ethernet-phy@6000 {
linux,phandle = <6000>;
interrupt-parent = <7400>;
interrupts = <2 1>;
reg = <6000 50>;
phy-id = <8>;
device_type = "ethernet-phy";
};
ethernet-phy@6400 {
linux,phandle = <6400>;
interrupt-parent = <7400>;
interrupts = <2 1>;
reg = <6000 50>;
phy-id = <9>;
device_type = "ethernet-phy";
};
};
ethernet@6200 {
#size-cells = <0>;
device_type = "network";
model = "TSI-ETH";
compatible = "tsi-ethernet";
reg = <6000 200>;
address = [ 00 06 D2 00 00 01 ];
interrupts = <10 2>;
interrupt-parent = <7400>;
phy-handle = <6000>;
};
ethernet@6600 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "TSI-ETH";
compatible = "tsi-ethernet";
reg = <6400 200>;
address = [ 00 06 D2 00 00 02 ];
interrupts = <11 2>;
interrupt-parent = <7400>;
phy-handle = <6400>;
};
serial@7808 {
device_type = "serial";
compatible = "ns16550";
reg = <7808 200>;
clock-frequency = <3f6b5a00>;
interrupts = <c 0>;
interrupt-parent = <7400>;
};
serial@7c08 {
device_type = "serial";
compatible = "ns16550";
reg = <7c08 200>;
clock-frequency = <3f6b5a00>;
interrupts = <d 0>;
interrupt-parent = <7400>;
};
pic@7400 {
linux,phandle = <7400>;
clock-frequency = <0>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
reg = <7400 400>;
built-in;
compatible = "chrp,open-pic";
device_type = "open-pic";
big-endian;
};
pci@1000 {
compatible = "tsi10x";
device_type = "pci";
linux,phandle = <1000>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <1000 1000>;
bus-range = <0 0>;
ranges = <02000000 0 e0000000 e0000000 0 1A000000
01000000 0 00000000 fa000000 0 00010000>;
clock-frequency = <7f28154>;
interrupt-parent = <7400>;
interrupts = <17 2>;
interrupt-map-mask = <f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x11 */
0800 0 0 1 7400 24 0
0800 0 0 2 7400 25 0
0800 0 0 3 7400 26 0
0800 0 0 4 7400 27 0
/* IDSEL 0x12 */
1000 0 0 1 7400 25 0
1000 0 0 2 7400 26 0
1000 0 0 3 7400 27 0
1000 0 0 4 7400 24 0
/* IDSEL 0x13 */
1800 0 0 1 7400 26 0
1800 0 0 2 7400 27 0
1800 0 0 3 7400 24 0
1800 0 0 4 7400 25 0
/* IDSEL 0x14 */
2000 0 0 1 7400 27 0
2000 0 0 2 7400 24 0
2000 0 0 3 7400 25 0
2000 0 0 4 7400 26 0
>;
};
};
};

View File

@ -0,0 +1,328 @@
/*
* MPC8349E MDS Device Tree Source
*
* Copyright 2005, 2006 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/ {
model = "MPC8349EMDS";
compatible = "MPC834xMDS";
#address-cells = <1>;
#size-cells = <1>;
cpus {
#cpus = <1>;
#address-cells = <1>;
#size-cells = <0>;
PowerPC,8349@0 {
device_type = "cpu";
reg = <0>;
d-cache-line-size = <20>; // 32 bytes
i-cache-line-size = <20>; // 32 bytes
d-cache-size = <8000>; // L1, 32K
i-cache-size = <8000>; // L1, 32K
timebase-frequency = <0>; // from bootloader
bus-frequency = <0>; // from bootloader
clock-frequency = <0>; // from bootloader
32-bit;
};
};
memory {
device_type = "memory";
reg = <00000000 10000000>; // 256MB at 0
};
soc8349@e0000000 {
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <2>;
device_type = "soc";
ranges = <0 e0000000 00100000>;
reg = <e0000000 00000200>;
bus-frequency = <0>;
wdt@200 {
device_type = "watchdog";
compatible = "mpc83xx_wdt";
reg = <200 100>;
};
i2c@3000 {
device_type = "i2c";
compatible = "fsl-i2c";
reg = <3000 100>;
interrupts = <e 8>;
interrupt-parent = <700>;
dfsrr;
};
i2c@3100 {
device_type = "i2c";
compatible = "fsl-i2c";
reg = <3100 100>;
interrupts = <f 8>;
interrupt-parent = <700>;
dfsrr;
};
spi@7000 {
device_type = "spi";
compatible = "mpc83xx_spi";
reg = <7000 1000>;
interrupts = <10 8>;
interrupt-parent = <700>;
mode = <0>;
};
/* phy type (ULPI or SERIAL) are only types supportted for MPH */
/* port = 0 or 1 */
usb@22000 {
device_type = "usb";
compatible = "fsl-usb2-mph";
reg = <22000 1000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <700>;
interrupts = <27 2>;
phy_type = "ulpi";
port1;
};
/* phy type (ULPI, UTMI, UTMI_WIDE, SERIAL) */
usb@23000 {
device_type = "usb";
compatible = "fsl-usb2-dr";
reg = <23000 1000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <700>;
interrupts = <26 2>;
phy_type = "ulpi";
};
mdio@24520 {
device_type = "mdio";
compatible = "gianfar";
reg = <24520 20>;
#address-cells = <1>;
#size-cells = <0>;
linux,phandle = <24520>;
ethernet-phy@0 {
linux,phandle = <2452000>;
interrupt-parent = <700>;
interrupts = <11 2>;
reg = <0>;
device_type = "ethernet-phy";
};
ethernet-phy@1 {
linux,phandle = <2452001>;
interrupt-parent = <700>;
interrupts = <12 2>;
reg = <1>;
device_type = "ethernet-phy";
};
};
ethernet@24000 {
device_type = "network";
model = "TSEC";
compatible = "gianfar";
reg = <24000 1000>;
address = [ 00 00 00 00 00 00 ];
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <20 8 21 8 22 8>;
interrupt-parent = <700>;
phy-handle = <2452000>;
};
ethernet@25000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "TSEC";
compatible = "gianfar";
reg = <25000 1000>;
address = [ 00 00 00 00 00 00 ];
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <23 8 24 8 25 8>;
interrupt-parent = <700>;
phy-handle = <2452001>;
};
serial@4500 {
device_type = "serial";
compatible = "ns16550";
reg = <4500 100>;
clock-frequency = <0>;
interrupts = <9 8>;
interrupt-parent = <700>;
};
serial@4600 {
device_type = "serial";
compatible = "ns16550";
reg = <4600 100>;
clock-frequency = <0>;
interrupts = <a 8>;
interrupt-parent = <700>;
};
pci@8500 {
interrupt-map-mask = <f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x11 */
8800 0 0 1 700 14 8
8800 0 0 2 700 15 8
8800 0 0 3 700 16 8
8800 0 0 4 700 17 8
/* IDSEL 0x12 */
9000 0 0 1 700 16 8
9000 0 0 2 700 17 8
9000 0 0 3 700 14 8
9000 0 0 4 700 15 8
/* IDSEL 0x13 */
9800 0 0 1 700 17 8
9800 0 0 2 700 14 8
9800 0 0 3 700 15 8
9800 0 0 4 700 16 8
/* IDSEL 0x15 */
a800 0 0 1 700 14 8
a800 0 0 2 700 15 8
a800 0 0 3 700 16 8
a800 0 0 4 700 17 8
/* IDSEL 0x16 */
b000 0 0 1 700 17 8
b000 0 0 2 700 14 8
b000 0 0 3 700 15 8
b000 0 0 4 700 16 8
/* IDSEL 0x17 */
b800 0 0 1 700 16 8
b800 0 0 2 700 17 8
b800 0 0 3 700 14 8
b800 0 0 4 700 15 8
/* IDSEL 0x18 */
b000 0 0 1 700 15 8
b000 0 0 2 700 16 8
b000 0 0 3 700 17 8
b000 0 0 4 700 14 8>;
interrupt-parent = <700>;
interrupts = <42 8>;
bus-range = <0 0>;
ranges = <02000000 0 a0000000 a0000000 0 10000000
42000000 0 80000000 80000000 0 10000000
01000000 0 00000000 e2000000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <8500 100>;
compatible = "83xx";
device_type = "pci";
};
pci@8600 {
interrupt-map-mask = <f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x11 */
8800 0 0 1 700 14 8
8800 0 0 2 700 15 8
8800 0 0 3 700 16 8
8800 0 0 4 700 17 8
/* IDSEL 0x12 */
9000 0 0 1 700 16 8
9000 0 0 2 700 17 8
9000 0 0 3 700 14 8
9000 0 0 4 700 15 8
/* IDSEL 0x13 */
9800 0 0 1 700 17 8
9800 0 0 2 700 14 8
9800 0 0 3 700 15 8
9800 0 0 4 700 16 8
/* IDSEL 0x15 */
a800 0 0 1 700 14 8
a800 0 0 2 700 15 8
a800 0 0 3 700 16 8
a800 0 0 4 700 17 8
/* IDSEL 0x16 */
b000 0 0 1 700 17 8
b000 0 0 2 700 14 8
b000 0 0 3 700 15 8
b000 0 0 4 700 16 8
/* IDSEL 0x17 */
b800 0 0 1 700 16 8
b800 0 0 2 700 17 8
b800 0 0 3 700 14 8
b800 0 0 4 700 15 8
/* IDSEL 0x18 */
b000 0 0 1 700 15 8
b000 0 0 2 700 16 8
b000 0 0 3 700 17 8
b000 0 0 4 700 14 8>;
interrupt-parent = <700>;
interrupts = <42 8>;
bus-range = <0 0>;
ranges = <02000000 0 b0000000 b0000000 0 10000000
42000000 0 90000000 90000000 0 10000000
01000000 0 00000000 e2100000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <8600 100>;
compatible = "83xx";
device_type = "pci";
};
/* May need to remove if on a part without crypto engine */
crypto@30000 {
device_type = "crypto";
model = "SEC2";
compatible = "talitos";
reg = <30000 10000>;
interrupts = <b 8>;
interrupt-parent = <700>;
num-channels = <4>;
channel-fifo-len = <18>;
exec-units-mask = <0000007e>;
/* desc mask is for rev2.0,
* we need runtime fixup for >2.0 */
descriptor-types-mask = <01010ebf>;
};
/* IPIC
* interrupts cell = <intr #, sense>
* sense values match linux IORESOURCE_IRQ_* defines:
* sense == 8: Level, low assertion
* sense == 2: Edge, high-to-low change
*/
pic@700 {
linux,phandle = <700>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
reg = <700 100>;
built-in;
device_type = "ipic";
};
};
};

View File

@ -2,6 +2,11 @@
* FPU support code, moved here from head.S so that it can be used
* by chips which use other head-whatever.S files.
*
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
* Copyright (C) 1996 Paul Mackerras.
* Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version

View File

@ -322,7 +322,8 @@ EXPORT_SYMBOL(do_softirq);
static LIST_HEAD(irq_hosts);
static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
static unsigned int irq_radix_writer;
struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
@ -455,6 +456,58 @@ void irq_set_virq_count(unsigned int count)
irq_virq_count = count;
}
/* radix tree not lockless safe ! we use a brlock-type mecanism
* for now, until we can use a lockless radix tree
*/
static void irq_radix_wrlock(unsigned long *flags)
{
unsigned int cpu, ok;
spin_lock_irqsave(&irq_big_lock, *flags);
irq_radix_writer = 1;
smp_mb();
do {
barrier();
ok = 1;
for_each_possible_cpu(cpu) {
if (per_cpu(irq_radix_reader, cpu)) {
ok = 0;
break;
}
}
if (!ok)
cpu_relax();
} while(!ok);
}
static void irq_radix_wrunlock(unsigned long flags)
{
smp_wmb();
irq_radix_writer = 0;
spin_unlock_irqrestore(&irq_big_lock, flags);
}
static void irq_radix_rdlock(unsigned long *flags)
{
local_irq_save(*flags);
__get_cpu_var(irq_radix_reader) = 1;
smp_mb();
if (likely(irq_radix_writer == 0))
return;
__get_cpu_var(irq_radix_reader) = 0;
smp_wmb();
spin_lock(&irq_big_lock);
__get_cpu_var(irq_radix_reader) = 1;
spin_unlock(&irq_big_lock);
}
static void irq_radix_rdunlock(unsigned long flags)
{
__get_cpu_var(irq_radix_reader) = 0;
local_irq_restore(flags);
}
unsigned int irq_create_mapping(struct irq_host *host,
irq_hw_number_t hwirq)
{
@ -604,13 +657,9 @@ void irq_dispose_mapping(unsigned int virq)
/* Check if radix tree allocated yet */
if (host->revmap_data.tree.gfp_mask == 0)
break;
/* XXX radix tree not safe ! remove lock whem it becomes safe
* and use some RCU sync to make sure everything is ok before we
* can re-use that map entry
*/
spin_lock_irqsave(&irq_big_lock, flags);
irq_radix_wrlock(&flags);
radix_tree_delete(&host->revmap_data.tree, hwirq);
spin_unlock_irqrestore(&irq_big_lock, flags);
irq_radix_wrunlock(flags);
break;
}
@ -677,25 +726,24 @@ unsigned int irq_radix_revmap(struct irq_host *host,
if (tree->gfp_mask == 0)
return irq_find_mapping(host, hwirq);
/* XXX Current radix trees are NOT SMP safe !!! Remove that lock
* when that is fixed (when Nick's patch gets in
*/
spin_lock_irqsave(&irq_big_lock, flags);
/* Now try to resolve */
irq_radix_rdlock(&flags);
ptr = radix_tree_lookup(tree, hwirq);
irq_radix_rdunlock(flags);
/* Found it, return */
if (ptr) {
virq = ptr - irq_map;
goto bail;
return virq;
}
/* If not there, try to insert it */
virq = irq_find_mapping(host, hwirq);
if (virq != NO_IRQ)
if (virq != NO_IRQ) {
irq_radix_wrlock(&flags);
radix_tree_insert(tree, hwirq, &irq_map[virq]);
bail:
spin_unlock_irqrestore(&irq_big_lock, flags);
irq_radix_wrunlock(flags);
}
return virq;
}
@ -806,12 +854,12 @@ static int irq_late_init(void)
struct irq_host *h;
unsigned long flags;
spin_lock_irqsave(&irq_big_lock, flags);
irq_radix_wrlock(&flags);
list_for_each_entry(h, &irq_hosts, link) {
if (h->revmap_type == IRQ_HOST_MAP_TREE)
INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
}
spin_unlock_irqrestore(&irq_big_lock, flags);
irq_radix_wrunlock(flags);
return 0;
}

View File

@ -1289,6 +1289,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
DBG("Try to map irq for %s...\n", pci_name(pci_dev));
#ifdef DEBUG
memset(&oirq, 0xff, sizeof(oirq));
#endif
/* Try to get a mapping from the device-tree */
if (of_irq_map_pci(pci_dev, &oirq)) {
u8 line, pin;
@ -1314,8 +1317,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
if (virq != NO_IRQ)
set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.controller->full_name);
DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
oirq.controller->full_name);
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@ -1324,6 +1328,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
DBG(" -> failed to map !\n");
return -1;
}
DBG(" -> mapped to linux irq %d\n", virq);
pci_dev->irq = virq;
pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);

View File

@ -126,10 +126,6 @@ EXPORT_SYMBOL(pci_bus_mem_base_phys);
EXPORT_SYMBOL(pci_bus_to_hose);
#endif /* CONFIG_PCI */
#ifdef CONFIG_NOT_COHERENT_CACHE
EXPORT_SYMBOL(flush_dcache_all);
#endif
EXPORT_SYMBOL(start_thread);
EXPORT_SYMBOL(kernel_thread);

View File

@ -646,13 +646,13 @@ static unsigned char ibm_architecture_vec[] = {
5 - 1, /* 5 option vectors */
/* option vector 1: processor architectures supported */
3 - 1, /* length */
3 - 2, /* length */
0, /* don't ignore, don't halt */
OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
OV1_PPC_2_04 | OV1_PPC_2_05,
/* option vector 2: Open Firmware options supported */
34 - 1, /* length */
34 - 2, /* length */
OV2_REAL_MODE,
0, 0,
W(0xffffffff), /* real_base */
@ -666,16 +666,16 @@ static unsigned char ibm_architecture_vec[] = {
48, /* max log_2(hash table size) */
/* option vector 3: processor options supported */
3 - 1, /* length */
3 - 2, /* length */
0, /* don't ignore, don't halt */
OV3_FP | OV3_VMX,
/* option vector 4: IBM PAPR implementation */
2 - 1, /* length */
2 - 2, /* length */
0, /* don't halt */
/* option vector 5: PAPR/OF options */
3 - 1, /* length */
3 - 2, /* length */
0, /* don't ignore, don't halt */
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
};

View File

@ -639,14 +639,17 @@ void of_irq_map_init(unsigned int flags)
}
int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 *addr,
struct of_irq *out_irq)
int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 ointsize,
u32 *addr, struct of_irq *out_irq)
{
struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
u32 *tmp, *imap, *imask;
u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
int imaplen, match, i;
DBG("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
parent->full_name, intspec[0], intspec[1], ointsize);
ipar = of_node_get(parent);
/* First get the #interrupt-cells property of the current cursor
@ -670,6 +673,9 @@ int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 *addr,
DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
if (ointsize != intsize)
return -EINVAL;
/* Look for this #address-cells. We have to implement the old linux
* trick of looking for the parent here as some device-trees rely on it
*/
@ -875,12 +881,15 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
}
intsize = *tmp;
DBG(" intsize=%d intlen=%d\n", intsize, intlen);
/* Check index */
if ((index + 1) * intsize > intlen)
return -EINVAL;
/* Get new specifier and map it */
res = of_irq_map_raw(p, intspec + index * intsize, addr, out_irq);
res = of_irq_map_raw(p, intspec + index * intsize, intsize,
addr, out_irq);
of_node_put(p);
return res;
}
@ -965,7 +974,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
laddr[0] = (pdev->bus->number << 16)
| (pdev->devfn << 8);
laddr[1] = laddr[2] = 0;
return of_irq_map_raw(ppnode, &lspec, laddr, out_irq);
return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
}
EXPORT_SYMBOL_GPL(of_irq_map_pci);
#endif /* CONFIG_PCI */

View File

@ -45,8 +45,9 @@ void __devinit smp_generic_take_timebase(void)
{
int cmd;
u64 tb;
unsigned long flags;
local_irq_disable();
local_irq_save(flags);
while (!running)
barrier();
rmb();
@ -70,7 +71,7 @@ void __devinit smp_generic_take_timebase(void)
set_tb(tb >> 32, tb & 0xfffffffful);
enter_contest(tbsync->mark, -1);
}
local_irq_enable();
local_irq_restore(flags);
}
static int __devinit start_contest(int cmd, long offset, int num)

View File

@ -125,15 +125,8 @@ static long timezone_offset;
unsigned long ppc_proc_freq;
unsigned long ppc_tb_freq;
u64 tb_last_jiffy __cacheline_aligned_in_smp;
unsigned long tb_last_stamp;
/*
* Note that on ppc32 this only stores the bottom 32 bits of
* the timebase value, but that's enough to tell when a jiffy
* has passed.
*/
DEFINE_PER_CPU(unsigned long, last_jiffy);
static u64 tb_last_jiffy __cacheline_aligned_in_smp;
static DEFINE_PER_CPU(u64, last_jiffy);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
@ -458,7 +451,7 @@ void do_gettimeofday(struct timeval *tv)
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
sec = xtime.tv_sec;
nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
usec = nsec / 1000;
while (usec >= 1000000) {
@ -700,7 +693,6 @@ void timer_interrupt(struct pt_regs * regs)
tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
tb_last_jiffy = tb_next_jiffy;
tb_last_stamp = per_cpu(last_jiffy, cpu);
do_timer(regs);
timer_recalc_offset(tb_last_jiffy);
timer_check_rtc();
@ -749,7 +741,7 @@ void __init smp_space_timers(unsigned int max_cpus)
int i;
unsigned long half = tb_ticks_per_jiffy / 2;
unsigned long offset = tb_ticks_per_jiffy / max_cpus;
unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
previous_tb -= tb_ticks_per_jiffy;
@ -830,7 +822,7 @@ int do_settimeofday(struct timespec *tv)
* and therefore the (jiffies - wall_jiffies) computation
* has been removed.
*/
tb_delta = tb_ticks_since(tb_last_stamp);
tb_delta = tb_ticks_since(tb_last_jiffy);
tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
@ -950,8 +942,7 @@ void __init time_init(void)
if (__USE_RTC()) {
/* 601 processor: dec counts down by 128 every 128ns */
ppc_tb_freq = 1000000000;
tb_last_stamp = get_rtcl();
tb_last_jiffy = tb_last_stamp;
tb_last_jiffy = get_rtcl();
} else {
/* Normal PowerPC with timebase register */
ppc_md.calibrate_decr();
@ -959,7 +950,7 @@ void __init time_init(void)
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
tb_last_stamp = tb_last_jiffy = get_tb();
tb_last_jiffy = get_tb();
}
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
@ -1036,7 +1027,7 @@ void __init time_init(void)
do_gtod.varp = &do_gtod.vars[0];
do_gtod.var_idx = 0;
do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
__get_cpu_var(last_jiffy) = tb_last_stamp;
__get_cpu_var(last_jiffy) = tb_last_jiffy;
do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
do_gtod.varp->tb_to_xs = tb_to_xs;

View File

@ -11,6 +11,7 @@
.align 7
_GLOBAL(memcpy)
std r3,48(r1) /* save destination pointer for return value */
mtcrf 0x01,r5
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
@ -38,7 +39,7 @@ _GLOBAL(memcpy)
stdu r9,16(r3)
bdnz 1b
3: std r8,8(r3)
beqlr
beq 3f
addi r3,r3,16
ld r9,8(r4)
.Ldo_tail:
@ -53,7 +54,8 @@ _GLOBAL(memcpy)
2: bf cr7*4+3,3f
rotldi r9,r9,8
stb r9,0(r3)
3: blr
3: ld r3,48(r1) /* return dest pointer */
blr
.Lsrc_unaligned:
srdi r6,r5,3
@ -115,7 +117,7 @@ _GLOBAL(memcpy)
5: srd r12,r9,r11
or r12,r8,r12
std r12,24(r3)
beqlr
beq 4f
cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
@ -167,4 +169,5 @@ _GLOBAL(memcpy)
3: bf cr7*4+3,4f
lbz r0,0(r4)
stb r0,0(r3)
4: blr
4: ld r3,48(r1) /* return dest pointer */
blr

View File

@ -103,7 +103,7 @@ unsigned long __init mmu_mapin_ram(void)
/* Determine number of entries necessary to cover lowmem */
pinned_tlbs = (unsigned int)
(_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT);
(_ALIGN(total_lowmem, PPC_PIN_SIZE) >> PPC44x_PIN_SHIFT);
/* Write upper watermark to save location */
tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
@ -111,7 +111,7 @@ unsigned long __init mmu_mapin_ram(void)
/* If necessary, set additional pinned TLBs */
if (pinned_tlbs > 1)
for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC_PIN_SIZE;
ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
}

View File

@ -46,26 +46,6 @@ unsigned long isa_io_base = 0;
unsigned long isa_mem_base = 0;
#endif
#ifdef CONFIG_PCI
static int
mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
{
static char pci_irq_table[][4] =
/*
* PCI IDSEL/INTPIN->INTLINE
* A B C D
*/
{
{PIRQB, PIRQC, PIRQD, PIRQA}, /* idsel 0x0e */
{PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x0f */
{PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x10 */
};
const long min_idsel = 0x0e, max_idsel = 0x10, irqs_per_slot = 4;
return PCI_IRQ_TABLE_LOOKUP;
}
#endif /* CONFIG_PCI */
/* ************************************************************************
*
* Setup the architecture
@ -92,8 +72,6 @@ static void __init mpc834x_itx_setup_arch(void)
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
add_bridge(np);
ppc_md.pci_swizzle = common_swizzle;
ppc_md.pci_map_irq = mpc83xx_map_irq;
ppc_md.pci_exclude_device = mpc83xx_exclude_device;
#endif
@ -106,25 +84,13 @@ static void __init mpc834x_itx_setup_arch(void)
void __init mpc834x_itx_init_IRQ(void)
{
u8 senses[8] = {
0, /* EXT 0 */
IRQ_SENSE_LEVEL, /* EXT 1 */
IRQ_SENSE_LEVEL, /* EXT 2 */
0, /* EXT 3 */
#ifdef CONFIG_PCI
IRQ_SENSE_LEVEL, /* EXT 4 */
IRQ_SENSE_LEVEL, /* EXT 5 */
IRQ_SENSE_LEVEL, /* EXT 6 */
IRQ_SENSE_LEVEL, /* EXT 7 */
#else
0, /* EXT 4 */
0, /* EXT 5 */
0, /* EXT 6 */
0, /* EXT 7 */
#endif
};
struct device_node *np;
ipic_init(get_immrbase() + 0x00700, 0, 0, senses, 8);
np = of_find_node_by_type(NULL, "ipic");
if (!np)
return;
ipic_init(np, 0);
/* Initialize the default interrupt mapping priorities,
* in case the boot rom changed something on us.
@ -153,4 +119,7 @@ define_machine(mpc834x_itx) {
.time_init = mpc83xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup = mpc83xx_pcibios_fixup,
#endif
};

View File

@ -43,33 +43,6 @@ unsigned long isa_io_base = 0;
unsigned long isa_mem_base = 0;
#endif
#ifdef CONFIG_PCI
static int
mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
{
static char pci_irq_table[][4] =
/*
* PCI IDSEL/INTPIN->INTLINE
* A B C D
*/
{
{PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x11 */
{PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x12 */
{PIRQD, PIRQA, PIRQB, PIRQC}, /* idsel 0x13 */
{0, 0, 0, 0},
{PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x15 */
{PIRQD, PIRQA, PIRQB, PIRQC}, /* idsel 0x16 */
{PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x17 */
{PIRQB, PIRQC, PIRQD, PIRQA}, /* idsel 0x18 */
{0, 0, 0, 0}, /* idsel 0x19 */
{0, 0, 0, 0}, /* idsel 0x20 */
};
const long min_idsel = 0x11, max_idsel = 0x20, irqs_per_slot = 4;
return PCI_IRQ_TABLE_LOOKUP;
}
#endif /* CONFIG_PCI */
/* ************************************************************************
*
* Setup the architecture
@ -96,8 +69,6 @@ static void __init mpc834x_sys_setup_arch(void)
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
add_bridge(np);
ppc_md.pci_swizzle = common_swizzle;
ppc_md.pci_map_irq = mpc83xx_map_irq;
ppc_md.pci_exclude_device = mpc83xx_exclude_device;
#endif
@ -110,25 +81,13 @@ static void __init mpc834x_sys_setup_arch(void)
void __init mpc834x_sys_init_IRQ(void)
{
u8 senses[8] = {
0, /* EXT 0 */
IRQ_SENSE_LEVEL, /* EXT 1 */
IRQ_SENSE_LEVEL, /* EXT 2 */
0, /* EXT 3 */
#ifdef CONFIG_PCI
IRQ_SENSE_LEVEL, /* EXT 4 */
IRQ_SENSE_LEVEL, /* EXT 5 */
IRQ_SENSE_LEVEL, /* EXT 6 */
IRQ_SENSE_LEVEL, /* EXT 7 */
#else
0, /* EXT 4 */
0, /* EXT 5 */
0, /* EXT 6 */
0, /* EXT 7 */
#endif
};
struct device_node *np;
ipic_init(get_immrbase() + 0x00700, 0, 0, senses, 8);
np = of_find_node_by_type(NULL, "ipic");
if (!np)
return;
ipic_init(np, 0);
/* Initialize the default interrupt mapping priorities,
* in case the boot rom changed something on us.
@ -178,4 +137,7 @@ define_machine(mpc834x_sys) {
.time_init = mpc83xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup = mpc83xx_pcibios_fixup,
#endif
};

View File

@ -11,6 +11,7 @@
extern int add_bridge(struct device_node *dev);
extern int mpc83xx_exclude_device(u_char bus, u_char devfn);
extern void mpc83xx_pcibios_fixup(void);
extern void mpc83xx_restart(char *cmd);
extern long mpc83xx_time_init(void);

View File

@ -45,6 +45,15 @@ int mpc83xx_exclude_device(u_char bus, u_char devfn)
return PCIBIOS_SUCCESSFUL;
}
void __init mpc83xx_pcibios_fixup(void)
{
struct pci_dev *dev = NULL;
/* map all the PCI irqs */
for_each_pci_dev(dev)
pci_read_irq_line(dev);
}
int __init add_bridge(struct device_node *dev)
{
int len;

View File

@ -52,6 +52,7 @@ unsigned long pci_dram_offset = 0;
#endif
#ifdef CONFIG_PCI
static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
@ -60,40 +61,43 @@ static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc,
generic_handle_irq(cascade_irq, regs);
desc->chip->eoi(irq);
}
#endif /* CONFIG_PCI */
void __init
mpc86xx_hpcn_init_irq(void)
{
struct mpic *mpic1;
struct device_node *np, *cascade_node = NULL;
struct device_node *np;
struct resource res;
#ifdef CONFIG_PCI
struct device_node *cascade_node = NULL;
int cascade_irq;
phys_addr_t openpic_paddr;
#endif
/* Determine PIC address. */
np = of_find_node_by_type(NULL, "open-pic");
if (np == NULL)
return;
/* Determine the Physical Address of the OpenPIC regs */
openpic_paddr = get_immrbase() + MPC86xx_OPENPIC_OFFSET;
of_address_to_resource(np, 0, &res);
/* Alloc mpic structure and per isu has 16 INT entries. */
mpic1 = mpic_alloc(np, openpic_paddr,
mpic1 = mpic_alloc(np, res.start,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
16, NR_IRQS - 4,
" MPIC ");
BUG_ON(mpic1 == NULL);
mpic_assign_isu(mpic1, 0, openpic_paddr + 0x10000);
mpic_assign_isu(mpic1, 0, res.start + 0x10000);
/* 48 Internal Interrupts */
mpic_assign_isu(mpic1, 1, openpic_paddr + 0x10200);
mpic_assign_isu(mpic1, 2, openpic_paddr + 0x10400);
mpic_assign_isu(mpic1, 3, openpic_paddr + 0x10600);
mpic_assign_isu(mpic1, 1, res.start + 0x10200);
mpic_assign_isu(mpic1, 2, res.start + 0x10400);
mpic_assign_isu(mpic1, 3, res.start + 0x10600);
/* 16 External interrupts
* Moving them from [0 - 15] to [64 - 79]
*/
mpic_assign_isu(mpic1, 4, openpic_paddr + 0x10000);
mpic_assign_isu(mpic1, 4, res.start + 0x10000);
mpic_init(mpic1);

View File

@ -188,7 +188,8 @@ int __init add_bridge(struct device_node *dev)
printk(KERN_INFO "Found MPC86xx PCIE host bridge at 0x%08lx. "
"Firmware bus number: %d->%d\n",
rsrc.start, hose->first_busno, hose->last_busno);
(unsigned long) rsrc.start,
hose->first_busno, hose->last_busno);
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);

View File

@ -80,6 +80,7 @@ config MPC7448HPC2
select DEFAULT_UIMAGE
select PPC_UDBG_16550
select MPIC
select MPIC_WEIRD
help
Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga)
platform

View File

@ -215,7 +215,7 @@ static void __init mpc7448_hpc2_init_IRQ(void)
mpic = mpic_alloc(tsi_pic, mpic_paddr,
MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
MPIC_SPV_EOI | MPIC_MOD_ID(MPIC_ID_TSI108),
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
0, /* num_sources used */
0, /* num_sources used */
"Tsi108_PIC");

View File

@ -256,7 +256,7 @@ static struct pmf_handlers macio_mmio_handlers = {
.write_reg32 = macio_do_write_reg32,
.read_reg32 = macio_do_read_reg32,
.write_reg8 = macio_do_write_reg8,
.read_reg32 = macio_do_read_reg8,
.read_reg8 = macio_do_read_reg8,
.read_reg32_msrx = macio_do_read_reg32_msrx,
.read_reg8_msrx = macio_do_read_reg8_msrx,
.write_reg32_slm = macio_do_write_reg32_slm,

View File

@ -87,8 +87,8 @@ static void __pmac_retrigger(unsigned int irq_nr)
static void pmac_mask_and_ack_irq(unsigned int virq)
{
unsigned int src = irq_map[virq].hwirq;
unsigned long bit = 1UL << (virq & 0x1f);
int i = virq >> 5;
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
spin_lock_irqsave(&pmac_pic_lock, flags);
@ -175,7 +175,7 @@ static void pmac_mask_irq(unsigned int virq)
spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
__pmac_set_irq_mask(src, 1);
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}

View File

@ -9,11 +9,11 @@ obj-$(CONFIG_BOOKE) += dcr.o
obj-$(CONFIG_40x) += dcr.o
obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_PPC_83xx) += ipic.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o
obj-$(CONFIG_PPC_TODC) += todc.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
ifeq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_I8259) += i8259.o
endif
obj-$(CONFIG_PPC_83xx) += ipic.o
endif

View File

@ -19,15 +19,18 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/sysdev.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/ipic.h>
#include <asm/mpc83xx.h>
#include "ipic.h"
static struct ipic p_ipic;
static struct ipic * primary_ipic;
static DEFINE_SPINLOCK(ipic_lock);
static struct ipic_info ipic_info[] = {
[9] = {
@ -373,74 +376,220 @@ static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32
out_be32(base + (reg >> 2), value);
}
static inline struct ipic * ipic_from_irq(unsigned int irq)
static inline struct ipic * ipic_from_irq(unsigned int virq)
{
return primary_ipic;
}
static void ipic_enable_irq(unsigned int irq)
#define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
static void ipic_unmask_irq(unsigned int virq)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
struct ipic *ipic = ipic_from_irq(virq);
unsigned int src = ipic_irq_to_hw(virq);
unsigned long flags;
u32 temp;
spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp |= (1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_disable_irq(unsigned int irq)
static void ipic_mask_irq(unsigned int virq)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
struct ipic *ipic = ipic_from_irq(virq);
unsigned int src = ipic_irq_to_hw(virq);
unsigned long flags;
u32 temp;
spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp &= ~(1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_disable_irq_and_ack(unsigned int irq)
static void ipic_ack_irq(unsigned int virq)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
struct ipic *ipic = ipic_from_irq(virq);
unsigned int src = ipic_irq_to_hw(virq);
unsigned long flags;
u32 temp;
ipic_disable_irq(irq);
spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].pend);
temp |= (1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].pend, temp);
spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_end_irq(unsigned int irq)
static void ipic_mask_irq_and_ack(unsigned int virq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
ipic_enable_irq(irq);
struct ipic *ipic = ipic_from_irq(virq);
unsigned int src = ipic_irq_to_hw(virq);
unsigned long flags;
u32 temp;
spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp &= ~(1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
temp = ipic_read(ipic->regs, ipic_info[src].pend);
temp |= (1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].pend, temp);
spin_unlock_irqrestore(&ipic_lock, flags);
}
struct hw_interrupt_type ipic = {
.typename = " IPIC ",
.enable = ipic_enable_irq,
.disable = ipic_disable_irq,
.ack = ipic_disable_irq_and_ack,
.end = ipic_end_irq,
static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
{
struct ipic *ipic = ipic_from_irq(virq);
unsigned int src = ipic_irq_to_hw(virq);
struct irq_desc *desc = get_irq_desc(virq);
unsigned int vold, vnew, edibit;
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
/* ipic supports only low assertion and high-to-low change senses
*/
if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
flow_type);
return -EINVAL;
}
desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
if (flow_type & IRQ_TYPE_LEVEL_LOW) {
desc->status |= IRQ_LEVEL;
set_irq_handler(virq, handle_level_irq);
} else {
set_irq_handler(virq, handle_edge_irq);
}
/* only EXT IRQ senses are programmable on ipic
* internal IRQ senses are LEVEL_LOW
*/
if (src == IPIC_IRQ_EXT0)
edibit = 15;
else
if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
edibit = (14 - (src - IPIC_IRQ_EXT1));
else
return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
vold = ipic_read(ipic->regs, IPIC_SECNR);
if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
vnew = vold | (1 << edibit);
} else {
vnew = vold & ~(1 << edibit);
}
if (vold != vnew)
ipic_write(ipic->regs, IPIC_SECNR, vnew);
return 0;
}
static struct irq_chip ipic_irq_chip = {
.typename = " IPIC ",
.unmask = ipic_unmask_irq,
.mask = ipic_mask_irq,
.mask_ack = ipic_mask_irq_and_ack,
.ack = ipic_ack_irq,
.set_type = ipic_set_irq_type,
};
void __init ipic_init(phys_addr_t phys_addr,
unsigned int flags,
unsigned int irq_offset,
unsigned char *senses,
unsigned int senses_count)
static int ipic_host_match(struct irq_host *h, struct device_node *node)
{
u32 i, temp = 0;
struct ipic *ipic = h->host_data;
primary_ipic = &p_ipic;
primary_ipic->regs = ioremap(phys_addr, MPC83xx_IPIC_SIZE);
/* Exact match, unless ipic node is NULL */
return ipic->of_node == NULL || ipic->of_node == node;
}
primary_ipic->irq_offset = irq_offset;
static int ipic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
struct ipic *ipic = h->host_data;
struct irq_chip *chip;
ipic_write(primary_ipic->regs, IPIC_SICNR, 0x0);
/* Default chip */
chip = &ipic->hc_irq;
set_irq_chip_data(virq, ipic);
set_irq_chip_and_handler(virq, chip, handle_level_irq);
/* Set default irq type */
set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static int ipic_host_xlate(struct irq_host *h, struct device_node *ct,
u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
/* interrupt sense values coming from the device tree equal either
* LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
*/
*out_hwirq = intspec[0];
if (intsize > 1)
*out_flags = intspec[1];
else
*out_flags = IRQ_TYPE_NONE;
return 0;
}
static struct irq_host_ops ipic_host_ops = {
.match = ipic_host_match,
.map = ipic_host_map,
.xlate = ipic_host_xlate,
};
void __init ipic_init(struct device_node *node,
unsigned int flags)
{
struct ipic *ipic;
struct resource res;
u32 temp = 0, ret;
ipic = alloc_bootmem(sizeof(struct ipic));
if (ipic == NULL)
return;
memset(ipic, 0, sizeof(struct ipic));
ipic->of_node = node ? of_node_get(node) : NULL;
ipic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
NR_IPIC_INTS,
&ipic_host_ops, 0);
if (ipic->irqhost == NULL) {
of_node_put(node);
return;
}
ret = of_address_to_resource(node, 0, &res);
if (ret)
return;
ipic->regs = ioremap(res.start, res.end - res.start + 1);
ipic->irqhost->host_data = ipic;
ipic->hc_irq = ipic_irq_chip;
/* init hw */
ipic_write(ipic->regs, IPIC_SICNR, 0x0);
/* default priority scheme is grouped. If spread mode is required
* configure SICFR accordingly */
@ -453,49 +602,35 @@ void __init ipic_init(phys_addr_t phys_addr,
if (flags & IPIC_SPREADMODE_MIX_B)
temp |= SICFR_MPSB;
ipic_write(primary_ipic->regs, IPIC_SICNR, temp);
ipic_write(ipic->regs, IPIC_SICNR, temp);
/* handle MCP route */
temp = 0;
if (flags & IPIC_DISABLE_MCP_OUT)
temp = SERCR_MCPR;
ipic_write(primary_ipic->regs, IPIC_SERCR, temp);
ipic_write(ipic->regs, IPIC_SERCR, temp);
/* handle routing of IRQ0 to MCP */
temp = ipic_read(primary_ipic->regs, IPIC_SEMSR);
temp = ipic_read(ipic->regs, IPIC_SEMSR);
if (flags & IPIC_IRQ0_MCP)
temp |= SEMSR_SIRQ0;
else
temp &= ~SEMSR_SIRQ0;
ipic_write(primary_ipic->regs, IPIC_SEMSR, temp);
ipic_write(ipic->regs, IPIC_SEMSR, temp);
for (i = 0 ; i < NR_IPIC_INTS ; i++) {
irq_desc[i+irq_offset].chip = &ipic;
irq_desc[i+irq_offset].status = IRQ_LEVEL;
}
primary_ipic = ipic;
irq_set_default_host(primary_ipic->irqhost);
temp = 0;
for (i = 0 ; i < senses_count ; i++) {
if ((senses[i] & IRQ_SENSE_MASK) == IRQ_SENSE_EDGE) {
temp |= 1 << (15 - i);
if (i != 0)
irq_desc[i + irq_offset + MPC83xx_IRQ_EXT1 - 1].status = 0;
else
irq_desc[irq_offset + MPC83xx_IRQ_EXT0].status = 0;
}
}
ipic_write(primary_ipic->regs, IPIC_SECNR, temp);
printk ("IPIC (%d IRQ sources, %d External IRQs) at %p\n", NR_IPIC_INTS,
senses_count, primary_ipic->regs);
printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
primary_ipic->regs);
}
int ipic_set_priority(unsigned int irq, unsigned int priority)
int ipic_set_priority(unsigned int virq, unsigned int priority)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
struct ipic *ipic = ipic_from_irq(virq);
unsigned int src = ipic_irq_to_hw(virq);
u32 temp;
if (priority > 7)
@ -520,10 +655,10 @@ int ipic_set_priority(unsigned int irq, unsigned int priority)
return 0;
}
void ipic_set_highest_priority(unsigned int irq)
void ipic_set_highest_priority(unsigned int virq)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
struct ipic *ipic = ipic_from_irq(virq);
unsigned int src = ipic_irq_to_hw(virq);
u32 temp;
temp = ipic_read(ipic->regs, IPIC_SICFR);
@ -537,37 +672,10 @@ void ipic_set_highest_priority(unsigned int irq)
void ipic_set_default_priority(void)
{
ipic_set_priority(MPC83xx_IRQ_TSEC1_TX, 0);
ipic_set_priority(MPC83xx_IRQ_TSEC1_RX, 1);
ipic_set_priority(MPC83xx_IRQ_TSEC1_ERROR, 2);
ipic_set_priority(MPC83xx_IRQ_TSEC2_TX, 3);
ipic_set_priority(MPC83xx_IRQ_TSEC2_RX, 4);
ipic_set_priority(MPC83xx_IRQ_TSEC2_ERROR, 5);
ipic_set_priority(MPC83xx_IRQ_USB2_DR, 6);
ipic_set_priority(MPC83xx_IRQ_USB2_MPH, 7);
ipic_set_priority(MPC83xx_IRQ_UART1, 0);
ipic_set_priority(MPC83xx_IRQ_UART2, 1);
ipic_set_priority(MPC83xx_IRQ_SEC2, 2);
ipic_set_priority(MPC83xx_IRQ_IIC1, 5);
ipic_set_priority(MPC83xx_IRQ_IIC2, 6);
ipic_set_priority(MPC83xx_IRQ_SPI, 7);
ipic_set_priority(MPC83xx_IRQ_RTC_SEC, 0);
ipic_set_priority(MPC83xx_IRQ_PIT, 1);
ipic_set_priority(MPC83xx_IRQ_PCI1, 2);
ipic_set_priority(MPC83xx_IRQ_PCI2, 3);
ipic_set_priority(MPC83xx_IRQ_EXT0, 4);
ipic_set_priority(MPC83xx_IRQ_EXT1, 5);
ipic_set_priority(MPC83xx_IRQ_EXT2, 6);
ipic_set_priority(MPC83xx_IRQ_EXT3, 7);
ipic_set_priority(MPC83xx_IRQ_RTC_ALR, 0);
ipic_set_priority(MPC83xx_IRQ_MU, 1);
ipic_set_priority(MPC83xx_IRQ_SBA, 2);
ipic_set_priority(MPC83xx_IRQ_DMA, 3);
ipic_set_priority(MPC83xx_IRQ_EXT4, 4);
ipic_set_priority(MPC83xx_IRQ_EXT5, 5);
ipic_set_priority(MPC83xx_IRQ_EXT6, 6);
ipic_set_priority(MPC83xx_IRQ_EXT7, 7);
ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_SIPRR_A_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_SIPRR_D_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_SMPRR_A_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_SMPRR_B_DEFAULT);
}
void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
@ -600,17 +708,20 @@ void ipic_clear_mcp_status(u32 mask)
ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
}
/* Return an interrupt vector or -1 if no interrupt is pending. */
int ipic_get_irq(struct pt_regs *regs)
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
unsigned int ipic_get_irq(struct pt_regs *regs)
{
int irq;
irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & 0x7f;
BUG_ON(primary_ipic == NULL);
#define IPIC_SIVCR_VECTOR_MASK 0x7f
irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
if (irq == 0) /* 0 --> no irq is pending */
irq = -1;
return NO_IRQ;
return irq;
return irq_linear_revmap(primary_ipic->irqhost, irq);
}
static struct sysdev_class ipic_sysclass = {

View File

@ -15,7 +15,18 @@
#include <asm/ipic.h>
#define MPC83xx_IPIC_SIZE (0x00100)
#define NR_IPIC_INTS 128
/* External IRQS */
#define IPIC_IRQ_EXT0 48
#define IPIC_IRQ_EXT1 17
#define IPIC_IRQ_EXT7 23
/* Default Priority Registers */
#define IPIC_SIPRR_A_DEFAULT 0x05309770
#define IPIC_SIPRR_D_DEFAULT 0x05309770
#define IPIC_SMPRR_A_DEFAULT 0x05309770
#define IPIC_SMPRR_B_DEFAULT 0x05309770
/* System Global Interrupt Configuration Register */
#define SICFR_IPSA 0x00010000
@ -31,7 +42,15 @@
struct ipic {
volatile u32 __iomem *regs;
unsigned int irq_offset;
/* The remapper for this IPIC */
struct irq_host *irqhost;
/* The "linux" controller struct */
struct irq_chip hc_irq;
/* The device node of the interrupt controller */
struct device_node *of_node;
};
struct ipic_info {

View File

@ -54,6 +54,94 @@ static DEFINE_SPINLOCK(mpic_lock);
#endif
#endif
#ifdef CONFIG_MPIC_WEIRD
static u32 mpic_infos[][MPIC_IDX_END] = {
[0] = { /* Original OpenPIC compatible MPIC */
MPIC_GREG_BASE,
MPIC_GREG_FEATURE_0,
MPIC_GREG_GLOBAL_CONF_0,
MPIC_GREG_VENDOR_ID,
MPIC_GREG_IPI_VECTOR_PRI_0,
MPIC_GREG_IPI_STRIDE,
MPIC_GREG_SPURIOUS,
MPIC_GREG_TIMER_FREQ,
MPIC_TIMER_BASE,
MPIC_TIMER_STRIDE,
MPIC_TIMER_CURRENT_CNT,
MPIC_TIMER_BASE_CNT,
MPIC_TIMER_VECTOR_PRI,
MPIC_TIMER_DESTINATION,
MPIC_CPU_BASE,
MPIC_CPU_STRIDE,
MPIC_CPU_IPI_DISPATCH_0,
MPIC_CPU_IPI_DISPATCH_STRIDE,
MPIC_CPU_CURRENT_TASK_PRI,
MPIC_CPU_WHOAMI,
MPIC_CPU_INTACK,
MPIC_CPU_EOI,
MPIC_IRQ_BASE,
MPIC_IRQ_STRIDE,
MPIC_IRQ_VECTOR_PRI,
MPIC_VECPRI_VECTOR_MASK,
MPIC_VECPRI_POLARITY_POSITIVE,
MPIC_VECPRI_POLARITY_NEGATIVE,
MPIC_VECPRI_SENSE_LEVEL,
MPIC_VECPRI_SENSE_EDGE,
MPIC_VECPRI_POLARITY_MASK,
MPIC_VECPRI_SENSE_MASK,
MPIC_IRQ_DESTINATION
},
[1] = { /* Tsi108/109 PIC */
TSI108_GREG_BASE,
TSI108_GREG_FEATURE_0,
TSI108_GREG_GLOBAL_CONF_0,
TSI108_GREG_VENDOR_ID,
TSI108_GREG_IPI_VECTOR_PRI_0,
TSI108_GREG_IPI_STRIDE,
TSI108_GREG_SPURIOUS,
TSI108_GREG_TIMER_FREQ,
TSI108_TIMER_BASE,
TSI108_TIMER_STRIDE,
TSI108_TIMER_CURRENT_CNT,
TSI108_TIMER_BASE_CNT,
TSI108_TIMER_VECTOR_PRI,
TSI108_TIMER_DESTINATION,
TSI108_CPU_BASE,
TSI108_CPU_STRIDE,
TSI108_CPU_IPI_DISPATCH_0,
TSI108_CPU_IPI_DISPATCH_STRIDE,
TSI108_CPU_CURRENT_TASK_PRI,
TSI108_CPU_WHOAMI,
TSI108_CPU_INTACK,
TSI108_CPU_EOI,
TSI108_IRQ_BASE,
TSI108_IRQ_STRIDE,
TSI108_IRQ_VECTOR_PRI,
TSI108_VECPRI_VECTOR_MASK,
TSI108_VECPRI_POLARITY_POSITIVE,
TSI108_VECPRI_POLARITY_NEGATIVE,
TSI108_VECPRI_SENSE_LEVEL,
TSI108_VECPRI_SENSE_EDGE,
TSI108_VECPRI_POLARITY_MASK,
TSI108_VECPRI_SENSE_MASK,
TSI108_IRQ_DESTINATION
},
};
#define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name]
#else /* CONFIG_MPIC_WEIRD */
#define MPIC_INFO(name) MPIC_##name
#endif /* CONFIG_MPIC_WEIRD */
/*
* Register accessor functions
*/
@ -80,7 +168,8 @@ static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
{
unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
(ipi * MPIC_INFO(GREG_IPI_STRIDE));
if (mpic->flags & MPIC_BROKEN_IPI)
be = !be;
@ -89,7 +178,8 @@ static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
{
unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
(ipi * MPIC_INFO(GREG_IPI_STRIDE));
_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
}
@ -120,7 +210,7 @@ static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigne
unsigned int idx = src_no & mpic->isu_mask;
return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
reg + (idx * MPIC_IRQ_STRIDE));
reg + (idx * MPIC_INFO(IRQ_STRIDE)));
}
static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
@ -130,7 +220,7 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
unsigned int idx = src_no & mpic->isu_mask;
_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
reg + (idx * MPIC_IRQ_STRIDE), value);
reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
}
#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
@ -156,8 +246,8 @@ static void __init mpic_test_broken_ipi(struct mpic *mpic)
{
u32 r;
mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
@ -394,8 +484,8 @@ static inline struct mpic * mpic_from_irq(unsigned int irq)
/* Send an EOI */
static inline void mpic_eoi(struct mpic *mpic)
{
mpic_cpu_write(MPIC_CPU_EOI, 0);
(void)mpic_cpu_read(MPIC_CPU_WHOAMI);
mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
(void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
}
#ifdef CONFIG_SMP
@ -419,8 +509,8 @@ static void mpic_unmask_irq(unsigned int irq)
DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
~MPIC_VECPRI_MASK);
/* make sure mask gets to controller before we return to user */
do {
@ -428,7 +518,7 @@ static void mpic_unmask_irq(unsigned int irq)
printk(KERN_ERR "mpic_enable_irq timeout\n");
break;
}
} while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
}
static void mpic_mask_irq(unsigned int irq)
@ -439,8 +529,8 @@ static void mpic_mask_irq(unsigned int irq)
DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
MPIC_VECPRI_MASK);
/* make sure mask gets to controller before we return to user */
@ -449,7 +539,7 @@ static void mpic_mask_irq(unsigned int irq)
printk(KERN_ERR "mpic_enable_irq timeout\n");
break;
}
} while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
}
static void mpic_end_irq(unsigned int irq)
@ -560,24 +650,28 @@ static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
cpus_and(tmp, cpumask, cpu_online_map);
mpic_irq_write(src, MPIC_IRQ_DESTINATION,
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
mpic_physmask(cpus_addr(tmp)[0]));
}
static unsigned int mpic_type_to_vecpri(unsigned int type)
static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
{
/* Now convert sense value */
switch(type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
return MPIC_VECPRI_SENSE_EDGE | MPIC_VECPRI_POLARITY_POSITIVE;
return MPIC_INFO(VECPRI_SENSE_EDGE) |
MPIC_INFO(VECPRI_POLARITY_POSITIVE);
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
return MPIC_VECPRI_SENSE_EDGE | MPIC_VECPRI_POLARITY_NEGATIVE;
return MPIC_INFO(VECPRI_SENSE_EDGE) |
MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
case IRQ_TYPE_LEVEL_HIGH:
return MPIC_VECPRI_SENSE_LEVEL | MPIC_VECPRI_POLARITY_POSITIVE;
return MPIC_INFO(VECPRI_SENSE_LEVEL) |
MPIC_INFO(VECPRI_POLARITY_POSITIVE);
case IRQ_TYPE_LEVEL_LOW:
default:
return MPIC_VECPRI_SENSE_LEVEL | MPIC_VECPRI_POLARITY_NEGATIVE;
return MPIC_INFO(VECPRI_SENSE_LEVEL) |
MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
}
}
@ -609,13 +703,14 @@ static int mpic_set_irq_type(unsigned int virq, unsigned int flow_type)
vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
MPIC_VECPRI_SENSE_EDGE;
else
vecpri = mpic_type_to_vecpri(flow_type);
vecpri = mpic_type_to_vecpri(mpic, flow_type);
vold = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI);
vnew = vold & ~(MPIC_VECPRI_POLARITY_MASK | MPIC_VECPRI_SENSE_MASK);
vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
MPIC_INFO(VECPRI_SENSE_MASK));
vnew |= vecpri;
if (vold != vnew)
mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, vnew);
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
return 0;
}
@ -798,17 +893,22 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->irq_count = irq_count;
mpic->num_sources = 0; /* so far */
#ifdef CONFIG_MPIC_WEIRD
mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)];
#endif
/* Map the global registers */
mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
mpic->gregs = ioremap(phys_addr + MPIC_INFO(GREG_BASE), 0x1000);
mpic->tmregs = mpic->gregs +
((MPIC_INFO(TIMER_BASE) - MPIC_INFO(GREG_BASE)) >> 2);
BUG_ON(mpic->gregs == NULL);
/* Reset */
if (flags & MPIC_WANTS_RESET) {
mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_RESET);
while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
& MPIC_GREG_GCONF_RESET)
mb();
}
@ -817,7 +917,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
* MPICs, num sources as well. On ISU MPICs, sources are counted
* as ISUs are added
*/
reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
reg = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
>> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
if (isu_size == 0)
@ -826,16 +926,16 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Map the per-CPU registers */
for (i = 0; i < mpic->num_cpus; i++) {
mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
i * MPIC_CPU_STRIDE, 0x1000);
mpic->cpuregs[i] = ioremap(phys_addr + MPIC_INFO(CPU_BASE) +
i * MPIC_INFO(CPU_STRIDE), 0x1000);
BUG_ON(mpic->cpuregs[i] == NULL);
}
/* Initialize main ISU if none provided */
if (mpic->isu_size == 0) {
mpic->isu_size = mpic->num_sources;
mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
MPIC_IRQ_STRIDE * mpic->isu_size);
mpic->isus[0] = ioremap(phys_addr + MPIC_INFO(IRQ_BASE),
MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
BUG_ON(mpic->isus[0] == NULL);
}
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
@ -879,7 +979,8 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
BUG_ON(isu_num >= MPIC_MAX_ISU);
mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
mpic->isus[isu_num] = ioremap(phys_addr,
MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
if ((isu_first + mpic->isu_size) > mpic->num_sources)
mpic->num_sources = isu_first + mpic->isu_size;
}
@ -904,14 +1005,16 @@ void __init mpic_init(struct mpic *mpic)
printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
/* Set current processor priority to max */
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
/* Initialize timers: just disable them all */
for (i = 0; i < 4; i++) {
mpic_write(mpic->tmregs,
i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
i * MPIC_INFO(TIMER_STRIDE) +
MPIC_INFO(TIMER_DESTINATION), 0);
mpic_write(mpic->tmregs,
i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
i * MPIC_INFO(TIMER_STRIDE) +
MPIC_INFO(TIMER_VECTOR_PRI),
MPIC_VECPRI_MASK |
(MPIC_VEC_TIMER_0 + i));
}
@ -940,21 +1043,22 @@ void __init mpic_init(struct mpic *mpic)
(8 << MPIC_VECPRI_PRIORITY_SHIFT);
/* init hw */
mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
mpic_irq_write(i, MPIC_IRQ_DESTINATION,
mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
1 << hard_smp_processor_id());
}
/* Init spurrious vector */
mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), MPIC_VEC_SPURRIOUS);
/* Disable 8259 passthrough */
mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
| MPIC_GREG_GCONF_8259_PTHROU_DIS);
/* Disable 8259 passthrough, if supported */
if (!(mpic->flags & MPIC_NO_PTHROU_DIS))
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_8259_PTHROU_DIS);
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
}
void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
@ -997,9 +1101,9 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
mpic_ipi_write(src - MPIC_VEC_IPI_0,
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
} else {
reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI)
reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
& ~MPIC_VECPRI_PRIORITY_MASK;
mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
}
spin_unlock_irqrestore(&mpic_lock, flags);
@ -1017,7 +1121,7 @@ unsigned int mpic_irq_get_priority(unsigned int irq)
if (is_ipi)
reg = mpic_ipi_read(src = MPIC_VEC_IPI_0);
else
reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI);
reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
spin_unlock_irqrestore(&mpic_lock, flags);
return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
}
@ -1043,12 +1147,12 @@ void mpic_setup_this_cpu(void)
*/
if (distribute_irqs) {
for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_IRQ_DESTINATION,
mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
}
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
spin_unlock_irqrestore(&mpic_lock, flags);
#endif /* CONFIG_SMP */
@ -1058,7 +1162,7 @@ int mpic_cpu_get_priority(void)
{
struct mpic *mpic = mpic_primary;
return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI));
}
void mpic_cpu_set_priority(int prio)
@ -1066,7 +1170,7 @@ void mpic_cpu_set_priority(int prio)
struct mpic *mpic = mpic_primary;
prio &= MPIC_CPU_TASKPRI_MASK;
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio);
}
/*
@ -1088,11 +1192,11 @@ void mpic_teardown_this_cpu(int secondary)
/* let the mpic know we don't want intrs. */
for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_IRQ_DESTINATION,
mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk);
/* Set current processor priority to max */
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
spin_unlock_irqrestore(&mpic_lock, flags);
}
@ -1108,7 +1212,8 @@ void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
#endif
mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
}
@ -1116,7 +1221,7 @@ unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
{
u32 src;
src = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
src = mpic_cpu_read(MPIC_INFO(CPU_INTACK)) & MPIC_INFO(VECPRI_VECTOR_MASK);
#ifdef DEBUG_LOW
DBG("%s: get_one_irq(): %d\n", mpic->name, src);
#endif

View File

@ -47,8 +47,9 @@ void __devinit
smp_generic_take_timebase( void )
{
int cmd, tbl, tbu;
unsigned long flags;
local_irq_disable();
local_irq_save(flags);
while( !running )
;
rmb();
@ -64,7 +65,7 @@ smp_generic_take_timebase( void )
tbu = tbsync->tbu;
tbsync->ack = 0;
if( cmd == kExit )
return;
break;
if( cmd == kSetAndTest ) {
while( tbsync->handshake )
@ -77,7 +78,7 @@ smp_generic_take_timebase( void )
}
enter_contest( tbsync->mark, -1 );
}
local_irq_enable();
local_irq_restore(flags);
}
static int __devinit

View File

@ -93,7 +93,7 @@ obj-$(CONFIG_PCI) += pci_auto.o
endif
obj-$(CONFIG_RAPIDIO) += ppc85xx_rio.o
obj-$(CONFIG_83xx) += ppc83xx_setup.o ppc_sys.o \
mpc83xx_sys.o mpc83xx_devices.o
mpc83xx_sys.o mpc83xx_devices.o ipic.o
ifeq ($(CONFIG_83xx),y)
obj-$(CONFIG_PCI) += pci_auto.o
endif

646
arch/ppc/syslib/ipic.c Normal file
View File

@ -0,0 +1,646 @@
/*
* include/asm-ppc/ipic.c
*
* IPIC routines implementations.
*
* Copyright 2005 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/sysdev.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/ipic.h>
#include <asm/mpc83xx.h>
#include "ipic.h"
static struct ipic p_ipic;
static struct ipic * primary_ipic;
static struct ipic_info ipic_info[] = {
[9] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 24,
.prio_mask = 0,
},
[10] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 25,
.prio_mask = 1,
},
[11] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 26,
.prio_mask = 2,
},
[14] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 29,
.prio_mask = 5,
},
[15] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 30,
.prio_mask = 6,
},
[16] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 31,
.prio_mask = 7,
},
[17] = {
.pend = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
.bit = 1,
.prio_mask = 5,
},
[18] = {
.pend = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
.bit = 2,
.prio_mask = 6,
},
[19] = {
.pend = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
.bit = 3,
.prio_mask = 7,
},
[20] = {
.pend = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_B,
.force = IPIC_SEFCR,
.bit = 4,
.prio_mask = 4,
},
[21] = {
.pend = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_B,
.force = IPIC_SEFCR,
.bit = 5,
.prio_mask = 5,
},
[22] = {
.pend = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_B,
.force = IPIC_SEFCR,
.bit = 6,
.prio_mask = 6,
},
[23] = {
.pend = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_B,
.force = IPIC_SEFCR,
.bit = 7,
.prio_mask = 7,
},
[32] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 0,
.prio_mask = 0,
},
[33] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 1,
.prio_mask = 1,
},
[34] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 2,
.prio_mask = 2,
},
[35] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 3,
.prio_mask = 3,
},
[36] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 4,
.prio_mask = 4,
},
[37] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 5,
.prio_mask = 5,
},
[38] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 6,
.prio_mask = 6,
},
[39] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 7,
.prio_mask = 7,
},
[48] = {
.pend = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
.bit = 0,
.prio_mask = 4,
},
[64] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_A,
.force = IPIC_SIFCR_L,
.bit = 0,
.prio_mask = 0,
},
[65] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_A,
.force = IPIC_SIFCR_L,
.bit = 1,
.prio_mask = 1,
},
[66] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_A,
.force = IPIC_SIFCR_L,
.bit = 2,
.prio_mask = 2,
},
[67] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_A,
.force = IPIC_SIFCR_L,
.bit = 3,
.prio_mask = 3,
},
[68] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_B,
.force = IPIC_SIFCR_L,
.bit = 4,
.prio_mask = 0,
},
[69] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_B,
.force = IPIC_SIFCR_L,
.bit = 5,
.prio_mask = 1,
},
[70] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_B,
.force = IPIC_SIFCR_L,
.bit = 6,
.prio_mask = 2,
},
[71] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_B,
.force = IPIC_SIFCR_L,
.bit = 7,
.prio_mask = 3,
},
[72] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 8,
},
[73] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 9,
},
[74] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 10,
},
[75] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 11,
},
[76] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 12,
},
[77] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 13,
},
[78] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 14,
},
[79] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 15,
},
[80] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 16,
},
[84] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 20,
},
[85] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 21,
},
[90] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 26,
},
[91] = {
.pend = IPIC_SIPNR_H,
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 27,
},
};
static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
{
return in_be32(base + (reg >> 2));
}
static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
{
out_be32(base + (reg >> 2), value);
}
static inline struct ipic * ipic_from_irq(unsigned int irq)
{
return primary_ipic;
}
static void ipic_enable_irq(unsigned int irq)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
u32 temp;
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp |= (1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
}
static void ipic_disable_irq(unsigned int irq)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
u32 temp;
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp &= ~(1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
}
static void ipic_disable_irq_and_ack(unsigned int irq)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
u32 temp;
ipic_disable_irq(irq);
temp = ipic_read(ipic->regs, ipic_info[src].pend);
temp |= (1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].pend, temp);
}
static void ipic_end_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
ipic_enable_irq(irq);
}
struct hw_interrupt_type ipic = {
.typename = " IPIC ",
.enable = ipic_enable_irq,
.disable = ipic_disable_irq,
.ack = ipic_disable_irq_and_ack,
.end = ipic_end_irq,
};
void __init ipic_init(phys_addr_t phys_addr,
unsigned int flags,
unsigned int irq_offset,
unsigned char *senses,
unsigned int senses_count)
{
u32 i, temp = 0;
primary_ipic = &p_ipic;
primary_ipic->regs = ioremap(phys_addr, MPC83xx_IPIC_SIZE);
primary_ipic->irq_offset = irq_offset;
ipic_write(primary_ipic->regs, IPIC_SICNR, 0x0);
/* default priority scheme is grouped. If spread mode is required
* configure SICFR accordingly */
if (flags & IPIC_SPREADMODE_GRP_A)
temp |= SICFR_IPSA;
if (flags & IPIC_SPREADMODE_GRP_D)
temp |= SICFR_IPSD;
if (flags & IPIC_SPREADMODE_MIX_A)
temp |= SICFR_MPSA;
if (flags & IPIC_SPREADMODE_MIX_B)
temp |= SICFR_MPSB;
ipic_write(primary_ipic->regs, IPIC_SICNR, temp);
/* handle MCP route */
temp = 0;
if (flags & IPIC_DISABLE_MCP_OUT)
temp = SERCR_MCPR;
ipic_write(primary_ipic->regs, IPIC_SERCR, temp);
/* handle routing of IRQ0 to MCP */
temp = ipic_read(primary_ipic->regs, IPIC_SEMSR);
if (flags & IPIC_IRQ0_MCP)
temp |= SEMSR_SIRQ0;
else
temp &= ~SEMSR_SIRQ0;
ipic_write(primary_ipic->regs, IPIC_SEMSR, temp);
for (i = 0 ; i < NR_IPIC_INTS ; i++) {
irq_desc[i+irq_offset].chip = &ipic;
irq_desc[i+irq_offset].status = IRQ_LEVEL;
}
temp = 0;
for (i = 0 ; i < senses_count ; i++) {
if ((senses[i] & IRQ_SENSE_MASK) == IRQ_SENSE_EDGE) {
temp |= 1 << (15 - i);
if (i != 0)
irq_desc[i + irq_offset + MPC83xx_IRQ_EXT1 - 1].status = 0;
else
irq_desc[irq_offset + MPC83xx_IRQ_EXT0].status = 0;
}
}
ipic_write(primary_ipic->regs, IPIC_SECNR, temp);
printk ("IPIC (%d IRQ sources, %d External IRQs) at %p\n", NR_IPIC_INTS,
senses_count, primary_ipic->regs);
}
int ipic_set_priority(unsigned int irq, unsigned int priority)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
u32 temp;
if (priority > 7)
return -EINVAL;
if (src > 127)
return -EINVAL;
if (ipic_info[src].prio == 0)
return -EINVAL;
temp = ipic_read(ipic->regs, ipic_info[src].prio);
if (priority < 4) {
temp &= ~(0x7 << (20 + (3 - priority) * 3));
temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3);
} else {
temp &= ~(0x7 << (4 + (7 - priority) * 3));
temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3);
}
ipic_write(ipic->regs, ipic_info[src].prio, temp);
return 0;
}
void ipic_set_highest_priority(unsigned int irq)
{
struct ipic *ipic = ipic_from_irq(irq);
unsigned int src = irq - ipic->irq_offset;
u32 temp;
temp = ipic_read(ipic->regs, IPIC_SICFR);
/* clear and set HPI */
temp &= 0x7f000000;
temp |= (src & 0x7f) << 24;
ipic_write(ipic->regs, IPIC_SICFR, temp);
}
void ipic_set_default_priority(void)
{
ipic_set_priority(MPC83xx_IRQ_TSEC1_TX, 0);
ipic_set_priority(MPC83xx_IRQ_TSEC1_RX, 1);
ipic_set_priority(MPC83xx_IRQ_TSEC1_ERROR, 2);
ipic_set_priority(MPC83xx_IRQ_TSEC2_TX, 3);
ipic_set_priority(MPC83xx_IRQ_TSEC2_RX, 4);
ipic_set_priority(MPC83xx_IRQ_TSEC2_ERROR, 5);
ipic_set_priority(MPC83xx_IRQ_USB2_DR, 6);
ipic_set_priority(MPC83xx_IRQ_USB2_MPH, 7);
ipic_set_priority(MPC83xx_IRQ_UART1, 0);
ipic_set_priority(MPC83xx_IRQ_UART2, 1);
ipic_set_priority(MPC83xx_IRQ_SEC2, 2);
ipic_set_priority(MPC83xx_IRQ_IIC1, 5);
ipic_set_priority(MPC83xx_IRQ_IIC2, 6);
ipic_set_priority(MPC83xx_IRQ_SPI, 7);
ipic_set_priority(MPC83xx_IRQ_RTC_SEC, 0);
ipic_set_priority(MPC83xx_IRQ_PIT, 1);
ipic_set_priority(MPC83xx_IRQ_PCI1, 2);
ipic_set_priority(MPC83xx_IRQ_PCI2, 3);
ipic_set_priority(MPC83xx_IRQ_EXT0, 4);
ipic_set_priority(MPC83xx_IRQ_EXT1, 5);
ipic_set_priority(MPC83xx_IRQ_EXT2, 6);
ipic_set_priority(MPC83xx_IRQ_EXT3, 7);
ipic_set_priority(MPC83xx_IRQ_RTC_ALR, 0);
ipic_set_priority(MPC83xx_IRQ_MU, 1);
ipic_set_priority(MPC83xx_IRQ_SBA, 2);
ipic_set_priority(MPC83xx_IRQ_DMA, 3);
ipic_set_priority(MPC83xx_IRQ_EXT4, 4);
ipic_set_priority(MPC83xx_IRQ_EXT5, 5);
ipic_set_priority(MPC83xx_IRQ_EXT6, 6);
ipic_set_priority(MPC83xx_IRQ_EXT7, 7);
}
void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
{
struct ipic *ipic = primary_ipic;
u32 temp;
temp = ipic_read(ipic->regs, IPIC_SERMR);
temp |= (1 << (31 - mcp_irq));
ipic_write(ipic->regs, IPIC_SERMR, temp);
}
void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
{
struct ipic *ipic = primary_ipic;
u32 temp;
temp = ipic_read(ipic->regs, IPIC_SERMR);
temp &= (1 << (31 - mcp_irq));
ipic_write(ipic->regs, IPIC_SERMR, temp);
}
u32 ipic_get_mcp_status(void)
{
return ipic_read(primary_ipic->regs, IPIC_SERMR);
}
void ipic_clear_mcp_status(u32 mask)
{
ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
}
/* Return an interrupt vector or -1 if no interrupt is pending. */
int ipic_get_irq(struct pt_regs *regs)
{
int irq;
irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & 0x7f;
if (irq == 0) /* 0 --> no irq is pending */
irq = -1;
return irq;
}
static struct sysdev_class ipic_sysclass = {
set_kset_name("ipic"),
};
static struct sys_device device_ipic = {
.id = 0,
.cls = &ipic_sysclass,
};
static int __init init_ipic_sysfs(void)
{
int rc;
if (!primary_ipic->regs)
return -ENODEV;
printk(KERN_DEBUG "Registering ipic with sysfs...\n");
rc = sysdev_class_register(&ipic_sysclass);
if (rc) {
printk(KERN_ERR "Failed registering ipic sys class\n");
return -ENODEV;
}
rc = sysdev_register(&device_ipic);
if (rc) {
printk(KERN_ERR "Failed registering ipic sys device\n");
return -ENODEV;
}
return 0;
}
subsys_initcall(init_ipic_sysfs);

47
arch/ppc/syslib/ipic.h Normal file
View File

@ -0,0 +1,47 @@
/*
* IPIC private definitions and structure.
*
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2005 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __IPIC_H__
#define __IPIC_H__
#include <asm/ipic.h>
#define MPC83xx_IPIC_SIZE (0x00100)
/* System Global Interrupt Configuration Register */
#define SICFR_IPSA 0x00010000
#define SICFR_IPSD 0x00080000
#define SICFR_MPSA 0x00200000
#define SICFR_MPSB 0x00400000
/* System External Interrupt Mask Register */
#define SEMSR_SIRQ0 0x00008000
/* System Error Control Register */
#define SERCR_MCPR 0x00000001
struct ipic {
volatile u32 __iomem *regs;
unsigned int irq_offset;
};
struct ipic_info {
u8 pend; /* pending register offset from base */
u8 mask; /* mask register offset from base */
u8 prio; /* priority register offset from base */
u8 force; /* force register offset from base */
u8 bit; /* register bit position (as per doc)
bit mask = 1 << (31 - bit) */
u8 prio_mask; /* priority mask value */
};
#endif /* __IPIC_H__ */

View File

@ -88,30 +88,31 @@ __copy_to_user_asm:
.globl __copy_in_user_asm
# %r2 = from, %r3 = n, %r4 = to
__copy_in_user_asm:
ahi %r3,-1
jo 6f
sacf 256
bras 1,1f
mvc 0(1,%r4),0(%r2)
0: mvc 0(256,%r4),0(%r2)
la %r2,256(%r2)
la %r4,256(%r4)
1: ahi %r3,-256
jnm 0b
2: ex %r3,0(%r1)
sacf 0
slr %r2,%r2
br 14
3: mvc 0(1,%r4),0(%r2)
bras %r1,4f
0: ahi %r3,257
1: mvc 0(1,%r4),0(%r2)
la %r2,1(%r2)
la %r4,1(%r4)
ahi %r3,-1
jnz 1b
2: lr %r2,%r3
br %r14
3: mvc 0(256,%r4),0(%r2)
la %r2,256(%r2)
la %r4,256(%r4)
4: ahi %r3,-256
jnm 3b
4: lr %r2,%r3
5: ex %r3,4(%r1)
sacf 0
6: slr %r2,%r2
br %r14
.section __ex_table,"a"
.long 0b,3b
.long 2b,3b
.long 3b,4b
.long 1b,2b
.long 3b,0b
.long 5b,0b
.previous
.align 4

View File

@ -88,30 +88,31 @@ __copy_to_user_asm:
.globl __copy_in_user_asm
# %r2 = from, %r3 = n, %r4 = to
__copy_in_user_asm:
aghi %r3,-1
jo 6f
sacf 256
bras 1,1f
mvc 0(1,%r4),0(%r2)
0: mvc 0(256,%r4),0(%r2)
la %r2,256(%r2)
la %r4,256(%r4)
1: aghi %r3,-256
jnm 0b
2: ex %r3,0(%r1)
sacf 0
slgr %r2,%r2
br 14
3: mvc 0(1,%r4),0(%r2)
bras %r1,4f
0: aghi %r3,257
1: mvc 0(1,%r4),0(%r2)
la %r2,1(%r2)
la %r4,1(%r4)
aghi %r3,-1
jnm 3b
4: lgr %r2,%r3
sacf 0
jnz 1b
2: lgr %r2,%r3
br %r14
3: mvc 0(256,%r4),0(%r2)
la %r2,256(%r2)
la %r4,256(%r4)
4: aghi %r3,-256
jnm 3b
5: ex %r3,4(%r1)
sacf 0
6: slgr %r2,%r2
br 14
.section __ex_table,"a"
.quad 0b,3b
.quad 2b,3b
.quad 3b,4b
.quad 1b,2b
.quad 3b,0b
.quad 5b,0b
.previous
.align 4

View File

@ -219,6 +219,21 @@ asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user
return err;
}
int sparc_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
if (ARCH_SUN4C_SUN4 &&
(len > 0x20000000 ||
((flags & MAP_FIXED) &&
addr < 0xe0000000 && addr + len > 0x20000000)))
return -EINVAL;
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
return -EINVAL;
return 0;
}
/* Linux version of mmap */
static unsigned long do_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
@ -233,25 +248,13 @@ static unsigned long do_mmap2(unsigned long addr, unsigned long len,
goto out;
}
retval = -EINVAL;
len = PAGE_ALIGN(len);
if (ARCH_SUN4C_SUN4 &&
(len > 0x20000000 ||
((flags & MAP_FIXED) &&
addr < 0xe0000000 && addr + len > 0x20000000)))
goto out_putf;
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
goto out_putf;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
down_write(&current->mm->mmap_sem);
retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
up_write(&current->mm->mmap_sem);
out_putf:
if (file)
fput(file);
out:

View File

@ -548,6 +548,26 @@ asmlinkage long sparc64_personality(unsigned long personality)
return ret;
}
int sparc64_mmap_check(unsigned long addr, unsigned long len,
unsigned long flags)
{
if (test_thread_flag(TIF_32BIT)) {
if (len >= STACK_TOP32)
return -EINVAL;
if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
return -EINVAL;
} else {
if (len >= VA_EXCLUDE_START)
return -EINVAL;
if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
return -EINVAL;
}
return 0;
}
/* Linux version of mmap */
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
@ -563,27 +583,11 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
}
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
len = PAGE_ALIGN(len);
retval = -EINVAL;
if (test_thread_flag(TIF_32BIT)) {
if (len >= STACK_TOP32)
goto out_putf;
if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
goto out_putf;
} else {
if (len >= VA_EXCLUDE_START)
goto out_putf;
if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
goto out_putf;
}
down_write(&current->mm->mmap_sem);
retval = do_mmap(file, addr, len, prot, flags, off);
up_write(&current->mm->mmap_sem);
out_putf:
if (file)
fput(file);
out:

View File

@ -69,6 +69,8 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
} else
offset += PAGE_SIZE;
if (pte_write(entry))
entry = pte_mkdirty(entry);
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, address, pte, entry);

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.18-rc2
# Tue Jul 18 17:13:20 2006
# Linux kernel version: 2.6.18-rc4
# Thu Aug 24 21:05:55 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@ -201,7 +201,7 @@ CONFIG_ACPI_THERMAL=y
CONFIG_ACPI_NUMA=y
# CONFIG_ACPI_ASUS is not set
# CONFIG_ACPI_IBM is not set
CONFIG_ACPI_TOSHIBA=y
# CONFIG_ACPI_TOSHIBA is not set
CONFIG_ACPI_BLACKLIST_YEAR=0
# CONFIG_ACPI_DEBUG is not set
CONFIG_ACPI_EC=y
@ -216,7 +216,7 @@ CONFIG_ACPI_CONTAINER=y
#
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_TABLE=y
# CONFIG_CPU_FREQ_DEBUG is not set
CONFIG_CPU_FREQ_DEBUG=y
CONFIG_CPU_FREQ_STAT=y
# CONFIG_CPU_FREQ_STAT_DETAILS is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
@ -495,8 +495,9 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_CHR_DEV_ST is not set
# CONFIG_CHR_DEV_OSST is not set
# CONFIG_BLK_DEV_SR is not set
# CONFIG_CHR_DEV_SG is not set
CONFIG_BLK_DEV_SR=y
# CONFIG_BLK_DEV_SR_VENDOR is not set
CONFIG_CHR_DEV_SG=y
# CONFIG_CHR_DEV_SCH is not set
#
@ -512,7 +513,7 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_ATTRS is not set
CONFIG_SCSI_SAS_ATTRS=y
#
# SCSI low-level drivers
@ -538,7 +539,7 @@ CONFIG_MEGARAID_MAILBOX=y
CONFIG_MEGARAID_SAS=y
CONFIG_SCSI_SATA=y
CONFIG_SCSI_SATA_AHCI=y
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_SATA_SVW=y
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
CONFIG_SCSI_SATA_NV=y
@ -589,7 +590,7 @@ CONFIG_BLK_DEV_DM=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
# CONFIG_FUSION_FC is not set
# CONFIG_FUSION_SAS is not set
CONFIG_FUSION_SAS=y
CONFIG_FUSION_MAX_SGE=128
# CONFIG_FUSION_CTL is not set
@ -675,7 +676,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
CONFIG_B44=y
CONFIG_FORCEDETH=y
# CONFIG_DGRS is not set
# CONFIG_EEPRO100 is not set
@ -712,7 +713,7 @@ CONFIG_E1000=y
# CONFIG_SK98LIN is not set
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=y
# CONFIG_BNX2 is not set
CONFIG_BNX2=y
#
# Ethernet (10000 Mbit)
@ -842,44 +843,7 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# Watchdog Cards
#
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
#
# Watchdog Device Drivers
#
CONFIG_SOFT_WATCHDOG=y
# CONFIG_ACQUIRE_WDT is not set
# CONFIG_ADVANTECH_WDT is not set
# CONFIG_ALIM1535_WDT is not set
# CONFIG_ALIM7101_WDT is not set
# CONFIG_SC520_WDT is not set
# CONFIG_EUROTECH_WDT is not set
# CONFIG_IB700_WDT is not set
# CONFIG_IBMASR is not set
# CONFIG_WAFER_WDT is not set
# CONFIG_I6300ESB_WDT is not set
# CONFIG_I8XX_TCO is not set
# CONFIG_SC1200_WDT is not set
# CONFIG_60XX_WDT is not set
# CONFIG_SBC8360_WDT is not set
# CONFIG_CPU5_WDT is not set
# CONFIG_W83627HF_WDT is not set
# CONFIG_W83877F_WDT is not set
# CONFIG_W83977F_WDT is not set
# CONFIG_MACHZ_WDT is not set
# CONFIG_SBC_EPX_C3_WATCHDOG is not set
#
# PCI-based Watchdog Cards
#
# CONFIG_PCIPCWATCHDOG is not set
# CONFIG_WDTPCI is not set
#
# USB-based Watchdog Cards
#
# CONFIG_USBPCWATCHDOG is not set
# CONFIG_WATCHDOG is not set
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_INTEL=y
CONFIG_HW_RANDOM_AMD=y
@ -1056,6 +1020,7 @@ CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256
CONFIG_VIDEO_SELECT=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
# Sound
@ -1301,7 +1266,7 @@ CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y
# CONFIG_AUTOFS_FS is not set
CONFIG_AUTOFS4_FS=y
# CONFIG_FUSE_FS is not set
@ -1494,4 +1459,5 @@ CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_CRC16 is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_PLIST=y

View File

@ -73,39 +73,44 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the vsyscall DSO was being used.
*/
#define ELF_CORE_EXTRA_PHDRS (VSYSCALL32_EHDR->e_phnum)
#define ELF_CORE_EXTRA_PHDRS (find_vma(current->mm, VSYSCALL32_BASE) ? \
(VSYSCALL32_EHDR->e_phnum) : 0)
#define ELF_CORE_WRITE_EXTRA_PHDRS \
do { \
const struct elf32_phdr *const vsyscall_phdrs = \
(const struct elf32_phdr *) (VSYSCALL32_BASE \
+ VSYSCALL32_EHDR->e_phoff); \
int i; \
Elf32_Off ofs = 0; \
for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \
struct elf32_phdr phdr = vsyscall_phdrs[i]; \
if (phdr.p_type == PT_LOAD) { \
BUG_ON(ofs != 0); \
ofs = phdr.p_offset = offset; \
phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
phdr.p_filesz = phdr.p_memsz; \
offset += phdr.p_filesz; \
if (find_vma(current->mm, VSYSCALL32_BASE)) { \
const struct elf32_phdr *const vsyscall_phdrs = \
(const struct elf32_phdr *) (VSYSCALL32_BASE \
+ VSYSCALL32_EHDR->e_phoff);\
int i; \
Elf32_Off ofs = 0; \
for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \
struct elf32_phdr phdr = vsyscall_phdrs[i]; \
if (phdr.p_type == PT_LOAD) { \
BUG_ON(ofs != 0); \
ofs = phdr.p_offset = offset; \
phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
phdr.p_filesz = phdr.p_memsz; \
offset += phdr.p_filesz; \
} \
else \
phdr.p_offset += ofs; \
phdr.p_paddr = 0; /* match other core phdrs */ \
DUMP_WRITE(&phdr, sizeof(phdr)); \
} \
else \
phdr.p_offset += ofs; \
phdr.p_paddr = 0; /* match other core phdrs */ \
DUMP_WRITE(&phdr, sizeof(phdr)); \
} \
} while (0)
#define ELF_CORE_WRITE_EXTRA_DATA \
do { \
const struct elf32_phdr *const vsyscall_phdrs = \
(const struct elf32_phdr *) (VSYSCALL32_BASE \
+ VSYSCALL32_EHDR->e_phoff); \
int i; \
for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \
if (vsyscall_phdrs[i].p_type == PT_LOAD) \
DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr, \
PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
if (find_vma(current->mm, VSYSCALL32_BASE)) { \
const struct elf32_phdr *const vsyscall_phdrs = \
(const struct elf32_phdr *) (VSYSCALL32_BASE \
+ VSYSCALL32_EHDR->e_phoff); \
int i; \
for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \
if (vsyscall_phdrs[i].p_type == PT_LOAD) \
DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr,\
PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
} \
} \
} while (0)

View File

@ -71,7 +71,11 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
#endif
/* kernel code + 640k memory hole (later should not be needed, but
be paranoid for now) */
if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
if (last >= 640*1024 && addr < 1024*1024) {
*addrp = 1024*1024;
return 1;
}
if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
*addrp = __pa_symbol(&_end);
return 1;
}
@ -104,35 +108,6 @@ e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
return 0;
}
/*
* This function checks if the entire range <start,end> is mapped with type.
*
* Note: this function only works correct if the e820 table is sorted and
* not-overlapping, which is the case
*/
int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
{
int i;
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
if (type && ei->type != type)
continue;
/* is the region (part) in overlap with the current region ?*/
if (ei->addr >= end || ei->addr + ei->size <= start)
continue;
/* if the region is at the beginning of <start,end> we move
* start to the end of the region since it's ok until there
*/
if (ei->addr <= start)
start = ei->addr + ei->size;
/* if start is now at or beyond end, we're done, full coverage */
if (start >= end)
return 1; /* we're done */
}
return 0;
}
/*
* Find a free area in a specific range.
*/

View File

@ -973,6 +973,8 @@ ENTRY(kernel_thread)
ENDPROC(kernel_thread)
child_rip:
pushq $0 # fake return address
CFI_STARTPROC
/*
* Here we are in the child and the registers are set as they were
* at kernel_thread() invocation in the parent.
@ -983,6 +985,7 @@ child_rip:
# exit
xorl %edi, %edi
call do_exit
CFI_ENDPROC
ENDPROC(child_rip)
/*

View File

@ -191,6 +191,7 @@ startup_64:
* jump
*/
movq initial_code(%rip),%rax
pushq $0 # fake return address
jmp *%rax
/* SMP bootup changes these two */

View File

@ -46,4 +46,9 @@ EXPORT_SYMBOL(init_task);
*/
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
/* Copies of the original ist values from the tss are only accessed during
* debugging, no special alignment required.
*/
DEFINE_PER_CPU(struct orig_ist, orig_ist);
#define ALIGN_TO_4K __attribute__((section(".data.init_task")))

View File

@ -521,8 +521,6 @@ static void discover_ebda(void)
void __init setup_arch(char **cmdline_p)
{
unsigned long kernel_end;
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
screen_info = SCREEN_INFO;
edid_info = EDID_INFO;
@ -596,8 +594,8 @@ void __init setup_arch(char **cmdline_p)
(table_end - table_start) << PAGE_SHIFT);
/* reserve kernel */
kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
reserve_bootmem_generic(__pa_symbol(&_text),
__pa_symbol(&_end) - __pa_symbol(&_text));
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,

View File

@ -189,6 +189,7 @@ void __cpuinit cpu_init (void)
{
int cpu = stack_smp_processor_id();
struct tss_struct *t = &per_cpu(init_tss, cpu);
struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
unsigned long v;
char *estacks = NULL;
struct task_struct *me;
@ -256,7 +257,7 @@ void __cpuinit cpu_init (void)
estacks += EXCEPTION_STKSZ;
break;
}
t->ist[v] = (unsigned long)estacks;
orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
}
t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);

View File

@ -107,7 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
}
static int kstack_depth_to_print = 12;
#ifdef CONFIG_STACK_UNWIND
static int call_trace = 1;
#else
#define call_trace (-1)
#endif
#ifdef CONFIG_KALLSYMS
# include <linux/kallsyms.h>
@ -174,7 +178,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
break;
#endif
default:
end = per_cpu(init_tss, cpu).ist[k];
end = per_cpu(orig_ist, cpu).ist[k];
break;
}
/*
@ -274,21 +278,21 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
if (unwind_init_blocked(&info, tsk) == 0)
unw_ret = show_trace_unwind(&info, NULL);
}
if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
#ifdef CONFIG_STACK_UNWIND
unsigned long rip = info.regs.rip;
print_symbol("DWARF2 unwinder stuck at %s\n", rip);
if (call_trace == 1) {
printk("Leftover inexact backtrace:\n");
stack = (unsigned long *)info.regs.rsp;
} else if (call_trace > 1)
if (unw_ret > 0) {
if (call_trace == 1 && !arch_unw_user_mode(&info)) {
print_symbol("DWARF2 unwinder stuck at %s\n",
UNW_PC(&info));
if ((long)UNW_SP(&info) < 0) {
printk("Leftover inexact backtrace:\n");
stack = (unsigned long *)UNW_SP(&info);
} else
printk("Full inexact backtrace again:\n");
} else if (call_trace >= 1)
return;
else
printk("Full inexact backtrace again:\n");
#else
} else
printk("Inexact backtrace:\n");
#endif
}
}
/*
@ -1120,6 +1124,7 @@ static int __init kstack_setup(char *s)
}
__setup("kstack=", kstack_setup);
#ifdef CONFIG_STACK_UNWIND
static int __init call_trace_setup(char *s)
{
if (strcmp(s, "old") == 0)
@ -1133,3 +1138,4 @@ static int __init call_trace_setup(char *s)
return 1;
}
__setup("call_trace=", call_trace_setup);
#endif

View File

@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/dmi.h>
#include <asm/e820.h>
#include "pci.h"
@ -164,11 +165,33 @@ static __init void unreachable_devices(void)
}
}
static int disable_mcfg(struct dmi_system_id *d)
{
printk("PCI: %s detected. Disabling MCFG.\n", d->ident);
pci_probe &= ~PCI_PROBE_MMCONF;
return 0;
}
static struct dmi_system_id __initdata dmi_bad_mcfg[] = {
/* Has broken MCFG table that makes the system hang when used */
{
.callback = disable_mcfg,
.ident = "Intel D3C5105 SDV",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "Intel"),
DMI_MATCH(DMI_BOARD_NAME, "D26928"),
},
},
{}
};
void __init pci_mmcfg_init(void)
{
int i;
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
dmi_check_system(dmi_bad_mcfg);
if ((pci_probe & (PCI_PROBE_MMCONF|PCI_PROBE_MMCONF_FORCE)) == 0)
return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
@ -177,15 +200,6 @@ void __init pci_mmcfg_init(void)
(pci_mmcfg_config[0].base_address == 0))
return;
if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
/* RED-PEN i386 doesn't do _nocache right now */
pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL);
if (pci_mmcfg_virt == NULL) {

View File

@ -212,7 +212,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
*/
case PTRACE_KILL:
ret = 0;
if (child->state == EXIT_ZOMBIE) /* already dead */
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
break;
child->exit_code = SIGKILL;
child->ptrace &= ~PT_SINGLESTEP;

View File

@ -4177,6 +4177,11 @@ static int __init floppy_init(void)
int i, unit, drive;
int err, dr;
#if defined(CONFIG_PPC_MERGE)
if (check_legacy_ioport(FDC1))
return -ENODEV;
#endif
raw_cmd = NULL;
for (dr = 0; dr < N_DRIVE; dr++) {
@ -4234,13 +4239,6 @@ static int __init floppy_init(void)
}
use_virtual_dma = can_use_virtual_dma & 1;
#if defined(CONFIG_PPC_MERGE)
if (check_legacy_ioport(FDC1)) {
del_timer(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
#endif
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
del_timer(&fd_timeout);

View File

@ -175,6 +175,14 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
}
break;
case R200_EMIT_VAP_CTL:{
RING_LOCALS;
BEGIN_RING(2);
OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
ADVANCE_RING();
}
break;
case RADEON_EMIT_RB3D_COLORPITCH:
case RADEON_EMIT_RE_LINE_PATTERN:
case RADEON_EMIT_SE_LINE_WIDTH:
@ -202,7 +210,6 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
case R200_EMIT_TFACTOR_0:
case R200_EMIT_VTX_FMT_0:
case R200_EMIT_VAP_CTL:
case R200_EMIT_MATRIX_SELECT_0:
case R200_EMIT_TEX_PROC_CTL_2:
case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:

View File

@ -3428,6 +3428,7 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
if (rv) {
rv->user = NULL;
rv->done = free_recv_msg;
atomic_inc(&recv_msg_inuse_count);
}

View File

@ -391,8 +391,8 @@ static MGSL_PARAMS default_params = {
#define DESC_LIST_SIZE 4096
#define MASK_PARITY BIT1
#define MASK_FRAMING BIT2
#define MASK_BREAK BIT3
#define MASK_FRAMING BIT0
#define MASK_BREAK BIT14
#define MASK_OVERRUN BIT4
#define GSR 0x00 /* global status */
@ -1800,17 +1800,17 @@ static void rx_async(struct slgt_info *info)
stat = 0;
if ((status = *(p+1) & (BIT9 + BIT8))) {
if (status & BIT9)
if ((status = *(p+1) & (BIT1 + BIT0))) {
if (status & BIT1)
icount->parity++;
else if (status & BIT8)
else if (status & BIT0)
icount->frame++;
/* discard char if tty control flags say so */
if (status & info->ignore_status_mask)
continue;
if (status & BIT9)
if (status & BIT1)
stat = TTY_PARITY;
else if (status & BIT8)
else if (status & BIT0)
stat = TTY_FRAME;
}
if (tty) {

View File

@ -200,7 +200,7 @@ static int wd_margin = 0xB;
static int wd_multiplier = 2;
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(timeout, int, 27);
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Index into timeout table (0-63) (default=27 (60s))");
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
@ -407,7 +407,7 @@ module_exit(sbc8360_exit);
MODULE_AUTHOR("Ian E. Morgan <imorgan@webcon.ca>");
MODULE_DESCRIPTION("SBC8360 watchdog driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
MODULE_VERSION("1.01");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of sbc8360.c */

View File

@ -376,6 +376,8 @@ static int proc_ide_read_media
break;
case ide_floppy:media = "floppy\n";
break;
case ide_optical:media = "optical\n";
break;
default: media = "UNKNOWN\n";
break;
}

View File

@ -367,12 +367,13 @@ sgiioc4_INB(unsigned long port)
static void __devinit
ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
{
void __iomem *virt_dma_base;
int num_ports = sizeof (ioc4_dma_regs_t);
printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name,
dma_base, dma_base + num_ports - 1);
if (!request_region(dma_base, num_ports, hwif->name)) {
if (!request_mem_region(dma_base, num_ports, hwif->name)) {
printk(KERN_ERR
"%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
"ALREADY in use\n",
@ -381,13 +382,21 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
goto dma_alloc_failure;
}
hwif->dma_base = dma_base;
virt_dma_base = ioremap(dma_base, num_ports);
if (virt_dma_base == NULL) {
printk(KERN_ERR
"%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n",
__FUNCTION__, hwif->name, dma_base, dma_base + num_ports - 1);
goto dma_remap_failure;
}
hwif->dma_base = (unsigned long) virt_dma_base;
hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
&hwif->dmatable_dma);
if (!hwif->dmatable_cpu)
goto dma_alloc_failure;
goto dma_pci_alloc_failure;
hwif->sg_max_nents = IOC4_PRD_ENTRIES;
@ -411,6 +420,12 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
printk(KERN_INFO
"Changing from DMA to PIO mode for Drive %s\n", hwif->name);
dma_pci_alloc_failure:
iounmap(virt_dma_base);
dma_remap_failure:
release_mem_region(dma_base, num_ports);
dma_alloc_failure:
/* Disable DMA because we couldnot allocate any DMA maps */
hwif->autodma = 0;
@ -607,18 +622,15 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq;
hwif->ide_dma_timeout = &__ide_dma_timeout;
/*
* The IOC4 uses MMIO rather than Port IO.
* It also needs special workarounds for INB.
*/
default_hwif_mmiops(hwif);
hwif->INB = &sgiioc4_INB;
}
static int __devinit
sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
{
unsigned long base, ctl, dma_base, irqport;
unsigned long cmd_base, dma_base, irqport;
unsigned long bar0, cmd_phys_base, ctl;
void __iomem *virt_base;
ide_hwif_t *hwif;
int h;
@ -636,23 +648,32 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
}
/* Get the CmdBlk and CtrlBlk Base Registers */
base = pci_resource_start(dev, 0) + IOC4_CMD_OFFSET;
ctl = pci_resource_start(dev, 0) + IOC4_CTRL_OFFSET;
irqport = pci_resource_start(dev, 0) + IOC4_INTR_OFFSET;
bar0 = pci_resource_start(dev, 0);
virt_base = ioremap(bar0, pci_resource_len(dev, 0));
if (virt_base == NULL) {
printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n",
d->name, bar0);
return -ENOMEM;
}
cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET;
ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET;
irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET;
dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
if (!request_region(base, IOC4_CMD_CTL_BLK_SIZE, hwif->name)) {
cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
hwif->name)) {
printk(KERN_ERR
"%s : %s -- ERROR, Port Addresses "
"%s : %s -- ERROR, Addresses "
"0x%p to 0x%p ALREADY in use\n",
__FUNCTION__, hwif->name, (void *) base,
(void *) base + IOC4_CMD_CTL_BLK_SIZE);
__FUNCTION__, hwif->name, (void *) cmd_phys_base,
(void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
return -ENOMEM;
}
if (hwif->io_ports[IDE_DATA_OFFSET] != base) {
if (hwif->io_ports[IDE_DATA_OFFSET] != cmd_base) {
/* Initialize the IO registers */
sgiioc4_init_hwif_ports(&hwif->hw, base, ctl, irqport);
sgiioc4_init_hwif_ports(&hwif->hw, cmd_base, ctl, irqport);
memcpy(hwif->io_ports, hwif->hw.io_ports,
sizeof (hwif->io_ports));
hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
@ -665,6 +686,9 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
hwif->cds = (struct ide_pci_device_s *) d;
hwif->gendev.parent = &dev->dev;/* setup proper ancestral information */
/* The IOC4 uses MMIO rather than Port IO. */
default_hwif_mmiops(hwif);
/* Initializing chipset IRQ Registers */
hwif->OUTL(0x03, irqport + IOC4_INTR_SET * 4);

View File

@ -86,6 +86,8 @@ static const struct {
u8 chipset_family;
u8 flags;
} SiSHostChipInfo[] = {
{ "SiS968", PCI_DEVICE_ID_SI_968, ATA_133 },
{ "SiS966", PCI_DEVICE_ID_SI_966, ATA_133 },
{ "SiS965", PCI_DEVICE_ID_SI_965, ATA_133 },
{ "SiS745", PCI_DEVICE_ID_SI_745, ATA_100 },
{ "SiS735", PCI_DEVICE_ID_SI_735, ATA_100 },

View File

@ -6,7 +6,7 @@
*
* vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
* vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
* vt8235, vt8237
* vt8235, vt8237, vt8237a
*
* Copyright (c) 2000-2002 Vojtech Pavlik
*
@ -81,6 +81,7 @@ static struct via_isa_bridge {
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 },

View File

@ -41,9 +41,11 @@
/* Trivial bitmap-based allocator */
u32 mthca_alloc(struct mthca_alloc *alloc)
{
unsigned long flags;
u32 obj;
spin_lock(&alloc->lock);
spin_lock_irqsave(&alloc->lock, flags);
obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
if (obj >= alloc->max) {
alloc->top = (alloc->top + alloc->max) & alloc->mask;
@ -56,19 +58,24 @@ u32 mthca_alloc(struct mthca_alloc *alloc)
} else
obj = -1;
spin_unlock(&alloc->lock);
spin_unlock_irqrestore(&alloc->lock, flags);
return obj;
}
void mthca_free(struct mthca_alloc *alloc, u32 obj)
{
unsigned long flags;
obj &= alloc->max - 1;
spin_lock(&alloc->lock);
spin_lock_irqsave(&alloc->lock, flags);
clear_bit(obj, alloc->table);
alloc->last = min(alloc->last, obj);
alloc->top = (alloc->top + alloc->max) & alloc->mask;
spin_unlock(&alloc->lock);
spin_unlock_irqrestore(&alloc->lock, flags);
}
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,

View File

@ -18,17 +18,48 @@
static struct backlight_properties pmu_backlight_data;
static spinlock_t pmu_backlight_lock;
static int sleeping;
static u8 bl_curve[FB_BACKLIGHT_LEVELS];
static int pmu_backlight_get_level_brightness(struct fb_info *info,
int level)
static void pmu_backlight_init_curve(u8 off, u8 min, u8 max)
{
unsigned int i, flat, count, range = (max - min);
bl_curve[0] = off;
for (flat = 1; flat < (FB_BACKLIGHT_LEVELS / 16); ++flat)
bl_curve[flat] = min;
count = FB_BACKLIGHT_LEVELS * 15 / 16;
for (i = 0; i < count; ++i)
bl_curve[flat + i] = min + (range * (i + 1) / count);
}
static int pmu_backlight_curve_lookup(int value)
{
int level = (FB_BACKLIGHT_LEVELS - 1);
int i, max = 0;
/* Look for biggest value */
for (i = 0; i < FB_BACKLIGHT_LEVELS; i++)
max = max((int)bl_curve[i], max);
/* Look for nearest value */
for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) {
int diff = abs(bl_curve[i] - value);
if (diff < max) {
max = diff;
level = i;
}
}
return level;
}
static int pmu_backlight_get_level_brightness(int level)
{
int pmulevel;
/* Get and convert the value */
mutex_lock(&info->bl_mutex);
pmulevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_PMU_LEVEL;
mutex_unlock(&info->bl_mutex);
pmulevel = bl_curve[level] * FB_BACKLIGHT_MAX / MAX_PMU_LEVEL;
if (pmulevel < 0)
pmulevel = 0;
else if (pmulevel > MAX_PMU_LEVEL)
@ -39,7 +70,6 @@ static int pmu_backlight_get_level_brightness(struct fb_info *info,
static int pmu_backlight_update_status(struct backlight_device *bd)
{
struct fb_info *info = class_get_devdata(&bd->class_dev);
struct adb_request req;
unsigned long flags;
int level = bd->props->brightness;
@ -55,7 +85,7 @@ static int pmu_backlight_update_status(struct backlight_device *bd)
level = 0;
if (level > 0) {
int pmulevel = pmu_backlight_get_level_brightness(info, level);
int pmulevel = pmu_backlight_get_level_brightness(level);
pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmulevel);
pmu_wait_complete(&req);
@ -88,35 +118,19 @@ static struct backlight_properties pmu_backlight_data = {
};
#ifdef CONFIG_PM
static int pmu_backlight_sleep_call(struct pmu_sleep_notifier *self, int when)
void pmu_backlight_set_sleep(int sleep)
{
unsigned long flags;
spin_lock_irqsave(&pmu_backlight_lock, flags);
switch (when) {
case PBOOK_SLEEP_REQUEST:
sleeping = 1;
break;
case PBOOK_WAKE:
sleeping = 0;
break;
}
sleeping = sleep;
spin_unlock_irqrestore(&pmu_backlight_lock, flags);
return PBOOK_SLEEP_OK;
}
static struct pmu_sleep_notifier pmu_backlight_sleep_notif = {
.notifier_call = pmu_backlight_sleep_call,
};
#endif
#endif /* CONFIG_PM */
void __init pmu_backlight_init()
{
struct backlight_device *bd;
struct fb_info *info;
char name[10];
int level, autosave;
@ -131,27 +145,14 @@ void __init pmu_backlight_init()
!machine_is_compatible("PowerBook1,1"))
return;
/* Actually, this is a hack, but I don't know of a better way
* to get the first framebuffer device.
*/
info = registered_fb[0];
if (!info) {
printk("pmubl: No framebuffer found\n");
goto error;
}
snprintf(name, sizeof(name), "pmubl");
snprintf(name, sizeof(name), "pmubl%d", info->node);
bd = backlight_device_register(name, info, &pmu_backlight_data);
bd = backlight_device_register(name, NULL, &pmu_backlight_data);
if (IS_ERR(bd)) {
printk("pmubl: Backlight registration failed\n");
goto error;
}
mutex_lock(&info->bl_mutex);
info->bl_dev = bd;
fb_bl_default_curve(info, 0x7F, 0x46, 0x0E);
mutex_unlock(&info->bl_mutex);
pmu_backlight_init_curve(0x7F, 0x46, 0x0E);
level = pmu_backlight_data.max_brightness;
@ -161,28 +162,22 @@ void __init pmu_backlight_init()
pmu_request(&req, NULL, 2, 0xd9, 0);
pmu_wait_complete(&req);
mutex_lock(&info->bl_mutex);
level = pmac_backlight_curve_lookup(info,
level = pmu_backlight_curve_lookup(
(req.reply[0] >> 4) *
pmu_backlight_data.max_brightness / 15);
mutex_unlock(&info->bl_mutex);
}
up(&bd->sem);
down(&bd->sem);
bd->props->brightness = level;
bd->props->power = FB_BLANK_UNBLANK;
bd->props->update_status(bd);
down(&bd->sem);
up(&bd->sem);
mutex_lock(&pmac_backlight_mutex);
if (!pmac_backlight)
pmac_backlight = bd;
mutex_unlock(&pmac_backlight_mutex);
#ifdef CONFIG_PM
pmu_register_sleep_notifier(&pmu_backlight_sleep_notif);
#endif
printk("pmubl: Backlight initialized (%s)\n", name);
return;

View File

@ -1995,6 +1995,8 @@ restore_via_state(void)
out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
}
extern void pmu_backlight_set_sleep(int sleep);
static int
pmac_suspend_devices(void)
{
@ -2032,6 +2034,11 @@ pmac_suspend_devices(void)
return -EBUSY;
}
#ifdef CONFIG_PMAC_BACKLIGHT
/* Tell backlight code not to muck around with the chip anymore */
pmu_backlight_set_sleep(1);
#endif
/* Call platform functions marked "on sleep" */
pmac_pfunc_i2c_suspend();
pmac_pfunc_base_suspend();
@ -2090,6 +2097,11 @@ pmac_wakeup_devices(void)
{
mdelay(100);
#ifdef CONFIG_PMAC_BACKLIGHT
/* Tell backlight code it can use the chip again */
pmu_backlight_set_sleep(0);
#endif
/* Power back up system devices (including the PIC) */
device_power_up();

View File

@ -930,10 +930,13 @@ static void status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
conf->working_disks);
for (i = 0; i < conf->raid_disks; i++)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
seq_printf(seq, "%s",
conf->mirrors[i].rdev &&
test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
seq_printf(seq, "]");
}
@ -975,7 +978,6 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
static void print_conf(conf_t *conf)
{
int i;
mirror_info_t *tmp;
printk("RAID1 conf printout:\n");
if (!conf) {
@ -985,14 +987,17 @@ static void print_conf(conf_t *conf)
printk(" --- wd:%d rd:%d\n", conf->working_disks,
conf->raid_disks);
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->mirrors + i;
if (tmp->rdev)
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
printk(" disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
bdevname(rdev->bdev,b));
}
rcu_read_unlock();
}
static void close_sync(conf_t *conf)
@ -1008,20 +1013,20 @@ static int raid1_spare_active(mddev_t *mddev)
{
int i;
conf_t *conf = mddev->private;
mirror_info_t *tmp;
/*
* Find all failed disks within the RAID1 configuration
* and mark them readable
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
*/
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->mirrors + i;
if (tmp->rdev
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_bit(In_sync, &tmp->rdev->flags)) {
mdk_rdev_t *rdev = conf->mirrors[i].rdev;
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)) {
conf->working_disks++;
mddev->degraded--;
set_bit(In_sync, &tmp->rdev->flags);
set_bit(In_sync, &rdev->flags);
}
}
@ -1237,7 +1242,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
/* ouch - failed to read all of that.
* Try some synchronous reads of other devices to get
* good data, much like with normal read errors. Only
* read into the pages we already have so they we don't
* read into the pages we already have so we don't
* need to re-issue the read request.
* We don't need to freeze the array, because being in an
* active sync request, there is no normal IO, and
@ -1257,6 +1262,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
s = PAGE_SIZE >> 9;
do {
if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
/* No rcu protection needed here devices
* can only be removed when no resync is
* active, and resync is currently active
*/
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev->bdev,
sect + rdev->data_offset,
@ -1463,6 +1472,11 @@ static void raid1d(mddev_t *mddev)
s = PAGE_SIZE >> 9;
do {
/* Note: no rcu protection needed here
* as this is synchronous in the raid1d thread
* which is the thread that might remove
* a device. If raid1d ever becomes multi-threaded....
*/
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
@ -1486,7 +1500,6 @@ static void raid1d(mddev_t *mddev)
d = conf->raid_disks;
d--;
rdev = conf->mirrors[d].rdev;
atomic_add(s, &rdev->corrected_errors);
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
if (sync_page_io(rdev->bdev,
@ -1509,9 +1522,11 @@ static void raid1d(mddev_t *mddev)
s<<9, conf->tmppage, READ) == 0)
/* Well, this device is dead */
md_error(mddev, rdev);
else
else {
atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO "raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
mdname(mddev), s, (unsigned long long)(sect + rdev->data_offset), bdevname(rdev->bdev, b));
}
}
}
} else {
@ -1787,19 +1802,17 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
for (i=0; i<conf->raid_disks; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors);
md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
}
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev,
nr_sectors);
md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
return nr_sectors;
}

View File

@ -2393,7 +2393,7 @@ config MYRI10GE
you will need a newer firmware image.
You may get this image or more information, at:
<http://www.myri.com/Myri-10G/>
<http://www.myri.com/scs/download-Myri10GE.html>
To compile this driver as a module, choose M here and read
<file:Documentation/networking/net-modules.txt>. The module

View File

@ -173,8 +173,11 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static int debug = 3;
static int eeprom_bad_csum_allow = 0;
module_param(debug, int, 0);
module_param(eeprom_bad_csum_allow, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
#define DPRINTK(nlevel, klevel, fmt, args...) \
(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
@ -756,7 +759,8 @@ static int e100_eeprom_load(struct nic *nic)
checksum = le16_to_cpu(0xBABA - checksum);
if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
return -EAGAIN;
if (!eeprom_bad_csum_allow)
return -EAGAIN;
}
return 0;

View File

@ -1566,20 +1566,21 @@ static int __exit sunlance_sun4_remove(void)
static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match)
{
struct sbus_dev *sdev = to_sbus_device(&dev->dev);
struct device_node *dp = dev->node;
int err;
if (!strcmp(dp->name, "le")) {
if (sdev->parent) {
struct of_device *parent = &sdev->parent->ofdev;
if (!strcmp(parent->node->name, "ledma")) {
struct sbus_dma *ledma = find_ledma(to_sbus_device(&parent->dev));
err = sparc_lance_probe_one(sdev, ledma, NULL);
} else if (!strcmp(parent->node->name, "lebuffer")) {
err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev));
} else
err = sparc_lance_probe_one(sdev, NULL, NULL);
} else
err = sparc_lance_probe_one(sdev, NULL, NULL);
} else if (!strcmp(dp->name, "ledma")) {
struct sbus_dma *ledma = find_ledma(sdev);
err = sparc_lance_probe_one(sdev->child, ledma, NULL);
} else {
BUG_ON(strcmp(dp->name, "lebuffer"));
err = sparc_lance_probe_one(sdev->child, NULL, sdev);
}
return err;
}
@ -1604,12 +1605,6 @@ static struct of_device_id sunlance_sbus_match[] = {
{
.name = "le",
},
{
.name = "ledma",
},
{
.name = "lebuffer",
},
{},
};

View File

@ -467,6 +467,7 @@ static int arp_query(unsigned char *haddr, u32 paddr,
struct net_device *dev)
{
struct neighbour *neighbor_entry;
int ret = 0;
neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev);
@ -474,10 +475,11 @@ static int arp_query(unsigned char *haddr, u32 paddr,
neighbor_entry->used = jiffies;
if (neighbor_entry->nud_state & NUD_VALID) {
memcpy(haddr, neighbor_entry->ha, dev->addr_len);
return 1;
ret = 1;
}
neigh_release(neighbor_entry);
}
return 0;
return ret;
}
static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr,

View File

@ -667,6 +667,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_vi
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq);

View File

@ -52,7 +52,7 @@ static void dasd_setup_queue(struct dasd_device * device);
static void dasd_free_queue(struct dasd_device * device);
static void dasd_flush_request_queue(struct dasd_device *);
static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
static void dasd_flush_ccw_queue(struct dasd_device *, int);
static int dasd_flush_ccw_queue(struct dasd_device *, int);
static void dasd_tasklet(struct dasd_device *);
static void do_kick_device(void *data);
@ -60,6 +60,7 @@ static void do_kick_device(void *data);
* SECTION: Operations on the device structure.
*/
static wait_queue_head_t dasd_init_waitq;
static wait_queue_head_t dasd_flush_wq;
/*
* Allocate memory for a new device structure.
@ -121,7 +122,7 @@ dasd_free_device(struct dasd_device *device)
/*
* Make a new device known to the system.
*/
static inline int
static int
dasd_state_new_to_known(struct dasd_device *device)
{
int rc;
@ -145,7 +146,7 @@ dasd_state_new_to_known(struct dasd_device *device)
/*
* Let the system forget about a device.
*/
static inline void
static int
dasd_state_known_to_new(struct dasd_device * device)
{
/* Disable extended error reporting for this device. */
@ -163,12 +164,13 @@ dasd_state_known_to_new(struct dasd_device * device)
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device(device);
return 0;
}
/*
* Request the irq line for the device.
*/
static inline int
static int
dasd_state_known_to_basic(struct dasd_device * device)
{
int rc;
@ -192,17 +194,23 @@ dasd_state_known_to_basic(struct dasd_device * device)
/*
* Release the irq line for the device. Terminate any running i/o.
*/
static inline void
static int
dasd_state_basic_to_known(struct dasd_device * device)
{
int rc;
dasd_gendisk_free(device);
dasd_flush_ccw_queue(device, 1);
rc = dasd_flush_ccw_queue(device, 1);
if (rc)
return rc;
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
debug_unregister(device->debug_area);
device->debug_area = NULL;
}
device->state = DASD_STATE_KNOWN;
return 0;
}
/*
@ -219,7 +227,7 @@ dasd_state_basic_to_known(struct dasd_device * device)
* In case the analysis returns an error, the device setup is stopped
* (a fake disk was already added to allow formatting).
*/
static inline int
static int
dasd_state_basic_to_ready(struct dasd_device * device)
{
int rc;
@ -247,25 +255,31 @@ dasd_state_basic_to_ready(struct dasd_device * device)
* Forget format information. Check if the target level is basic
* and if it is create fake disk for formatting.
*/
static inline void
static int
dasd_state_ready_to_basic(struct dasd_device * device)
{
dasd_flush_ccw_queue(device, 0);
int rc;
rc = dasd_flush_ccw_queue(device, 0);
if (rc)
return rc;
dasd_destroy_partitions(device);
dasd_flush_request_queue(device);
device->blocks = 0;
device->bp_block = 0;
device->s2b_shift = 0;
device->state = DASD_STATE_BASIC;
return 0;
}
/*
* Back to basic.
*/
static inline void
static int
dasd_state_unfmt_to_basic(struct dasd_device * device)
{
device->state = DASD_STATE_BASIC;
return 0;
}
/*
@ -273,7 +287,7 @@ dasd_state_unfmt_to_basic(struct dasd_device * device)
* the requeueing of requests from the linux request queue to the
* ccw queue.
*/
static inline int
static int
dasd_state_ready_to_online(struct dasd_device * device)
{
device->state = DASD_STATE_ONLINE;
@ -284,16 +298,17 @@ dasd_state_ready_to_online(struct dasd_device * device)
/*
* Stop the requeueing of requests again.
*/
static inline void
static int
dasd_state_online_to_ready(struct dasd_device * device)
{
device->state = DASD_STATE_READY;
return 0;
}
/*
* Device startup state changes.
*/
static inline int
static int
dasd_increase_state(struct dasd_device *device)
{
int rc;
@ -329,30 +344,37 @@ dasd_increase_state(struct dasd_device *device)
/*
* Device shutdown state changes.
*/
static inline int
static int
dasd_decrease_state(struct dasd_device *device)
{
int rc;
rc = 0;
if (device->state == DASD_STATE_ONLINE &&
device->target <= DASD_STATE_READY)
dasd_state_online_to_ready(device);
rc = dasd_state_online_to_ready(device);
if (device->state == DASD_STATE_READY &&
if (!rc &&
device->state == DASD_STATE_READY &&
device->target <= DASD_STATE_BASIC)
dasd_state_ready_to_basic(device);
rc = dasd_state_ready_to_basic(device);
if (device->state == DASD_STATE_UNFMT &&
if (!rc &&
device->state == DASD_STATE_UNFMT &&
device->target <= DASD_STATE_BASIC)
dasd_state_unfmt_to_basic(device);
rc = dasd_state_unfmt_to_basic(device);
if (device->state == DASD_STATE_BASIC &&
if (!rc &&
device->state == DASD_STATE_BASIC &&
device->target <= DASD_STATE_KNOWN)
dasd_state_basic_to_known(device);
rc = dasd_state_basic_to_known(device);
if (device->state == DASD_STATE_KNOWN &&
if (!rc &&
device->state == DASD_STATE_KNOWN &&
device->target <= DASD_STATE_NEW)
dasd_state_known_to_new(device);
rc = dasd_state_known_to_new(device);
return 0;
return rc;
}
/*
@ -701,6 +723,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
cqr->retries--;
cqr->status = DASD_CQR_CLEAR;
cqr->stopclk = get_clock();
cqr->starttime = 0;
DBF_DEV_EVENT(DBF_DEBUG, device,
"terminate cqr %p successful",
cqr);
@ -978,6 +1001,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_QUEUED;
dasd_clear_timer(device);
wake_up(&dasd_flush_wq);
dasd_schedule_bh(device);
return;
}
@ -1241,6 +1265,10 @@ __dasd_check_expire(struct dasd_device * device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
DEV_MESSAGE(KERN_ERR, device,
"internal error - timeout (%is) expired "
"for cqr %p (%i retries left)",
(cqr->expires/HZ), cqr, cqr->retries);
if (device->discipline->term_IO(cqr) != 0)
/* Hmpf, try again in 1/10 sec */
dasd_set_timer(device, 10);
@ -1285,46 +1313,100 @@ __dasd_start_head(struct dasd_device * device)
dasd_set_timer(device, 50);
}
static inline int
_wait_for_clear(struct dasd_ccw_req *cqr)
{
return (cqr->status == DASD_CQR_QUEUED);
}
/*
* Remove requests from the ccw queue.
* Remove all requests from the ccw queue (all = '1') or only block device
* requests in case all = '0'.
* Take care of the erp-chain (chained via cqr->refers) and remove either
* the whole erp-chain or none of the erp-requests.
* If a request is currently running, term_IO is called and the request
* is re-queued. Prior to removing the terminated request we need to wait
* for the clear-interrupt.
* In case termination is not possible we stop processing and just finishing
* the already moved requests.
*/
static void
static int
dasd_flush_ccw_queue(struct dasd_device * device, int all)
{
struct dasd_ccw_req *cqr, *orig, *n;
int rc, i;
struct list_head flush_queue;
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
INIT_LIST_HEAD(&flush_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
list_for_each_safe(l, n, &device->ccw_queue) {
cqr = list_entry(l, struct dasd_ccw_req, list);
rc = 0;
restart:
list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) {
/* get original request of erp request-chain */
for (orig = cqr; orig->refers != NULL; orig = orig->refers);
/* Flush all request or only block device requests? */
if (all == 0 && cqr->callback == dasd_end_request_cb)
if (all == 0 && cqr->callback != dasd_end_request_cb &&
orig->callback != dasd_end_request_cb) {
continue;
if (cqr->status == DASD_CQR_IN_IO)
device->discipline->term_IO(cqr);
if (cqr->status != DASD_CQR_DONE ||
cqr->status != DASD_CQR_FAILED) {
cqr->status = DASD_CQR_FAILED;
}
/* Check status and move request to flush_queue */
switch (cqr->status) {
case DASD_CQR_IN_IO:
rc = device->discipline->term_IO(cqr);
if (rc) {
/* unable to terminate requeust */
DEV_MESSAGE(KERN_ERR, device,
"dasd flush ccw_queue is unable "
" to terminate request %p",
cqr);
/* stop flush processing */
goto finished;
}
break;
case DASD_CQR_QUEUED:
case DASD_CQR_ERROR:
/* set request to FAILED */
cqr->stopclk = get_clock();
cqr->status = DASD_CQR_FAILED;
break;
default: /* do not touch the others */
break;
}
/* Rechain request (including erp chain) */
for (i = 0; cqr != NULL; cqr = cqr->refers, i++) {
cqr->endclk = get_clock();
list_move_tail(&cqr->list, &flush_queue);
}
if (i > 1)
/* moved more than one request - need to restart */
goto restart;
}
finished:
spin_unlock_irq(get_ccwdev_lock(device->cdev));
/* Now call the callback function of flushed requests */
restart_cb:
list_for_each_entry_safe(cqr, n, &flush_queue, list) {
if (cqr->status == DASD_CQR_CLEAR) {
/* wait for clear interrupt! */
wait_event(dasd_flush_wq, _wait_for_clear(cqr));
cqr->status = DASD_CQR_FAILED;
}
/* Process finished ERP request. */
if (cqr->refers) {
__dasd_process_erp(device, cqr);
continue;
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */
goto restart_cb;
}
/* Rechain request on device request queue */
/* call the callback function */
cqr->endclk = get_clock();
list_move_tail(&cqr->list, &flush_queue);
}
spin_unlock_irq(get_ccwdev_lock(device->cdev));
/* Now call the callback function of flushed requests */
list_for_each_safe(l, n, &flush_queue) {
cqr = list_entry(l, struct dasd_ccw_req, list);
if (cqr->callback != NULL)
(cqr->callback)(cqr, cqr->callback_data);
}
return rc;
}
/*
@ -1510,10 +1592,8 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
if (device->discipline->term_IO) {
cqr->retries = -1;
device->discipline->term_IO(cqr);
/*nished =
* wait (non-interruptible) for final status
* because signal ist still pending
*/
/* wait (non-interruptible) for final status
* because signal ist still pending */
spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(wait_q, _wait_for_wakeup(cqr));
spin_lock_irq(get_ccwdev_lock(device->cdev));
@ -1546,19 +1626,11 @@ static inline int
_dasd_term_running_cqr(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
if (list_empty(&device->ccw_queue))
return 0;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
rc = device->discipline->term_IO(cqr);
if (rc == 0) {
/* termination successful */
cqr->status = DASD_CQR_QUEUED;
cqr->startclk = cqr->stopclk = 0;
cqr->starttime = 0;
}
return rc;
return device->discipline->term_IO(cqr);
}
int
@ -1726,10 +1798,7 @@ dasd_flush_request_queue(struct dasd_device * device)
return;
spin_lock_irq(&device->request_queue_lock);
while (!list_empty(&device->request_queue->queue_head)) {
req = elv_next_request(device->request_queue);
if (req == NULL)
break;
while ((req = elv_next_request(device->request_queue))) {
blkdev_dequeue_request(req);
dasd_end_request(req, 0);
}
@ -2091,6 +2160,7 @@ dasd_init(void)
int rc;
init_waitqueue_head(&dasd_init_waitq);
init_waitqueue_head(&dasd_flush_wq);
/* register 'common' DASD debug area, used for all DBF_XXX calls */
dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));

View File

@ -83,10 +83,12 @@ dasd_gendisk_alloc(struct dasd_device *device)
void
dasd_gendisk_free(struct dasd_device *device)
{
del_gendisk(device->gdp);
device->gdp->queue = NULL;
put_disk(device->gdp);
device->gdp = NULL;
if (device->gdp) {
del_gendisk(device->gdp);
device->gdp->queue = NULL;
put_disk(device->gdp);
device->gdp = NULL;
}
}
/*

Some files were not shown because too many files have changed in this diff Show More