forked from luck/tmp_suning_uos_patched
c663600584
Booting a 3.2, 3.3, or 3.4-rc4 kernel on an Atari using the `nfeth' ethernet device triggers a WARN_ONCE() in generic irq handling code on the first irq for that device: WARNING: at kernel/irq/handle.c:146 handle_irq_event_percpu+0x134/0x142() irq 3 handler nfeth_interrupt+0x0/0x194 enabled interrupts Modules linked in: Call Trace: [<000299b2>] warn_slowpath_common+0x48/0x6a [<000299c0>] warn_slowpath_common+0x56/0x6a [<00029a4c>] warn_slowpath_fmt+0x2a/0x32 [<0005b34c>] handle_irq_event_percpu+0x134/0x142 [<0005b34c>] handle_irq_event_percpu+0x134/0x142 [<0000a584>] nfeth_interrupt+0x0/0x194 [<001ba0a8>] schedule_preempt_disabled+0x0/0xc [<0005b37a>] handle_irq_event+0x20/0x2c [<0005add4>] generic_handle_irq+0x2c/0x3a [<00002ab6>] do_IRQ+0x20/0x32 [<0000289e>] auto_irqhandler_fixup+0x4/0x6 [<00003144>] cpu_idle+0x22/0x2e [<001b8a78>] printk+0x0/0x18 [<0024d112>] start_kernel+0x37a/0x386 [<0003021d>] __do_proc_dointvec+0xb1/0x366 [<0003021d>] __do_proc_dointvec+0xb1/0x366 [<0024c31e>] _sinittext+0x31e/0x9c0 After invoking the irq's handler the kernel sees !irqs_disabled() and concludes that the handler erroneously enabled interrupts. However, debugging shows that !irqs_disabled() is true even before the handler is invoked, which indicates a problem in the platform code rather than the specific driver. The warning does not occur in 3.1 or older kernels. It turns out that the ALLOWINT definition for Atari is incorrect. The Atari definition of ALLOWINT is ~0x400, the stated purpose of that is to avoid taking HSYNC interrupts. irqs_disabled() returns true if the 3-bit ipl & 4 is non-zero. The nfeth interrupt runs at ipl 3 (it's autovector 3), but 3 & 4 is zero so irqs_disabled() is false, and the warning above is generated. When interrupts are explicitly disabled, ipl is set to 7. When they are enabled, ipl is masked with ALLOWINT. On Atari this will result in ipl = 3, which blocks interrupts at ipl 3 and below. So how come nfeth interrupts at ipl 3 are received at all? That's because ipl is reset to 2 by Atari-specific code in default_idle(), again with the stated purpose of blocking HSYNC interrupts. This discrepancy means that ipl 3 can remain blocked for longer than intended. Both default_idle() and falcon_hblhandler() identify HSYNC with ipl 2, and the "Atari ST/.../F030 Hardware Register Listing" agrees, but ALLOWINT is defined as if HSYNC was ipl 3. [As an experiment I modified default_idle() to reset ipl to 3, and as expected that resulted in all nfeth interrupts being blocked.] The fix is simple: define ALLOWINT as ~0x500 instead. This makes arch_local_irq_enable() consistent with default_idle(), and prevents the !irqs_disabled() problems for ipl 3 interrupts. Tested on Atari running in an Aranym VM. Signed-off-by: Mikael Pettersson <mikpe@it.uu.se> Tested-by: Michael Schmitz <schmitzmic@googlemail.com> (on Falcon/CT60) Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
261 lines
5.7 KiB
C
261 lines
5.7 KiB
C
#ifndef __M68K_ENTRY_H
|
|
#define __M68K_ENTRY_H
|
|
|
|
#include <asm/setup.h>
|
|
#include <asm/page.h>
|
|
#ifdef __ASSEMBLY__
|
|
#include <asm/thread_info.h>
|
|
#endif
|
|
|
|
/*
|
|
* Stack layout in 'ret_from_exception':
|
|
*
|
|
* This allows access to the syscall arguments in registers d1-d5
|
|
*
|
|
* 0(sp) - d1
|
|
* 4(sp) - d2
|
|
* 8(sp) - d3
|
|
* C(sp) - d4
|
|
* 10(sp) - d5
|
|
* 14(sp) - a0
|
|
* 18(sp) - a1
|
|
* 1C(sp) - a2
|
|
* 20(sp) - d0
|
|
* 24(sp) - orig_d0
|
|
* 28(sp) - stack adjustment
|
|
* 2C(sp) - [ sr ] [ format & vector ]
|
|
* 2E(sp) - [ pc-hiword ] [ sr ]
|
|
* 30(sp) - [ pc-loword ] [ pc-hiword ]
|
|
* 32(sp) - [ format & vector ] [ pc-loword ]
|
|
* ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
|
|
* M68K COLDFIRE
|
|
*/
|
|
|
|
/* the following macro is used when enabling interrupts */
|
|
#if defined(MACH_ATARI_ONLY)
|
|
/* block out HSYNC = ipl 2 on the atari */
|
|
#define ALLOWINT (~0x500)
|
|
#else
|
|
/* portable version */
|
|
#define ALLOWINT (~0x700)
|
|
#endif /* machine compilation types */
|
|
|
|
#ifdef __ASSEMBLY__
|
|
/*
|
|
* This defines the normal kernel pt-regs layout.
|
|
*
|
|
* regs a3-a6 and d6-d7 are preserved by C code
|
|
* the kernel doesn't mess with usp unless it needs to
|
|
*/
|
|
#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */
|
|
|
|
#ifdef CONFIG_COLDFIRE
|
|
#ifdef CONFIG_COLDFIRE_SW_A7
|
|
/*
|
|
* This is made a little more tricky on older ColdFires. There is no
|
|
* separate supervisor and user stack pointers. Need to artificially
|
|
* construct a usp in software... When doing this we need to disable
|
|
* interrupts, otherwise bad things will happen.
|
|
*/
|
|
.globl sw_usp
|
|
.globl sw_ksp
|
|
|
|
.macro SAVE_ALL_SYS
|
|
move #0x2700,%sr /* disable intrs */
|
|
btst #5,%sp@(2) /* from user? */
|
|
bnes 6f /* no, skip */
|
|
movel %sp,sw_usp /* save user sp */
|
|
addql #8,sw_usp /* remove exception */
|
|
movel sw_ksp,%sp /* kernel sp */
|
|
subql #8,%sp /* room for exception */
|
|
clrl %sp@- /* stkadj */
|
|
movel %d0,%sp@- /* orig d0 */
|
|
movel %d0,%sp@- /* d0 */
|
|
lea %sp@(-32),%sp /* space for 8 regs */
|
|
moveml %d1-%d5/%a0-%a2,%sp@
|
|
movel sw_usp,%a0 /* get usp */
|
|
movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */
|
|
movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */
|
|
bra 7f
|
|
6:
|
|
clrl %sp@- /* stkadj */
|
|
movel %d0,%sp@- /* orig d0 */
|
|
movel %d0,%sp@- /* d0 */
|
|
lea %sp@(-32),%sp /* space for 8 regs */
|
|
moveml %d1-%d5/%a0-%a2,%sp@
|
|
7:
|
|
.endm
|
|
|
|
.macro SAVE_ALL_INT
|
|
SAVE_ALL_SYS
|
|
moveq #-1,%d0 /* not system call entry */
|
|
movel %d0,%sp@(PT_OFF_ORIG_D0)
|
|
.endm
|
|
|
|
.macro RESTORE_USER
|
|
move #0x2700,%sr /* disable intrs */
|
|
movel sw_usp,%a0 /* get usp */
|
|
movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
|
|
movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */
|
|
moveml %sp@,%d1-%d5/%a0-%a2
|
|
lea %sp@(32),%sp /* space for 8 regs */
|
|
movel %sp@+,%d0
|
|
addql #4,%sp /* orig d0 */
|
|
addl %sp@+,%sp /* stkadj */
|
|
addql #8,%sp /* remove exception */
|
|
movel %sp,sw_ksp /* save ksp */
|
|
subql #8,sw_usp /* set exception */
|
|
movel sw_usp,%sp /* restore usp */
|
|
rte
|
|
.endm
|
|
|
|
.macro RDUSP
|
|
movel sw_usp,%a3
|
|
.endm
|
|
|
|
.macro WRUSP
|
|
movel %a3,sw_usp
|
|
.endm
|
|
|
|
#else /* !CONFIG_COLDFIRE_SW_A7 */
|
|
/*
|
|
* Modern ColdFire parts have separate supervisor and user stack
|
|
* pointers. Simple load and restore macros for this case.
|
|
*/
|
|
.macro SAVE_ALL_SYS
|
|
move #0x2700,%sr /* disable intrs */
|
|
clrl %sp@- /* stkadj */
|
|
movel %d0,%sp@- /* orig d0 */
|
|
movel %d0,%sp@- /* d0 */
|
|
lea %sp@(-32),%sp /* space for 8 regs */
|
|
moveml %d1-%d5/%a0-%a2,%sp@
|
|
.endm
|
|
|
|
.macro SAVE_ALL_INT
|
|
move #0x2700,%sr /* disable intrs */
|
|
clrl %sp@- /* stkadj */
|
|
pea -1:w /* orig d0 */
|
|
movel %d0,%sp@- /* d0 */
|
|
lea %sp@(-32),%sp /* space for 8 regs */
|
|
moveml %d1-%d5/%a0-%a2,%sp@
|
|
.endm
|
|
|
|
.macro RESTORE_USER
|
|
moveml %sp@,%d1-%d5/%a0-%a2
|
|
lea %sp@(32),%sp /* space for 8 regs */
|
|
movel %sp@+,%d0
|
|
addql #4,%sp /* orig d0 */
|
|
addl %sp@+,%sp /* stkadj */
|
|
rte
|
|
.endm
|
|
|
|
.macro RDUSP
|
|
/*move %usp,%a3*/
|
|
.word 0x4e6b
|
|
.endm
|
|
|
|
.macro WRUSP
|
|
/*move %a3,%usp*/
|
|
.word 0x4e63
|
|
.endm
|
|
|
|
#endif /* !CONFIG_COLDFIRE_SW_A7 */
|
|
|
|
.macro SAVE_SWITCH_STACK
|
|
lea %sp@(-24),%sp /* 6 regs */
|
|
moveml %a3-%a6/%d6-%d7,%sp@
|
|
.endm
|
|
|
|
.macro RESTORE_SWITCH_STACK
|
|
moveml %sp@,%a3-%a6/%d6-%d7
|
|
lea %sp@(24),%sp /* 6 regs */
|
|
.endm
|
|
|
|
#else /* !CONFIG_COLDFIRE */
|
|
|
|
/*
|
|
* All other types of m68k parts (68000, 680x0, CPU32) have the same
|
|
* entry and exit code.
|
|
*/
|
|
|
|
/*
|
|
* a -1 in the orig_d0 field signifies
|
|
* that the stack frame is NOT for syscall
|
|
*/
|
|
.macro SAVE_ALL_INT
|
|
clrl %sp@- /* stk_adj */
|
|
pea -1:w /* orig d0 */
|
|
movel %d0,%sp@- /* d0 */
|
|
moveml %d1-%d5/%a0-%a2,%sp@-
|
|
.endm
|
|
|
|
.macro SAVE_ALL_SYS
|
|
clrl %sp@- /* stk_adj */
|
|
movel %d0,%sp@- /* orig d0 */
|
|
movel %d0,%sp@- /* d0 */
|
|
moveml %d1-%d5/%a0-%a2,%sp@-
|
|
.endm
|
|
|
|
.macro RESTORE_ALL
|
|
moveml %sp@+,%a0-%a2/%d1-%d5
|
|
movel %sp@+,%d0
|
|
addql #4,%sp /* orig d0 */
|
|
addl %sp@+,%sp /* stk adj */
|
|
rte
|
|
.endm
|
|
|
|
|
|
.macro SAVE_SWITCH_STACK
|
|
moveml %a3-%a6/%d6-%d7,%sp@-
|
|
.endm
|
|
|
|
.macro RESTORE_SWITCH_STACK
|
|
moveml %sp@+,%a3-%a6/%d6-%d7
|
|
.endm
|
|
|
|
#endif /* !CONFIG_COLDFIRE */
|
|
|
|
/*
|
|
* Register %a2 is reserved and set to current task on MMU enabled systems.
|
|
* Non-MMU systems do not reserve %a2 in this way, and this definition is
|
|
* not used for them.
|
|
*/
|
|
#ifdef CONFIG_MMU
|
|
|
|
#define curptr a2
|
|
|
|
#define GET_CURRENT(tmp) get_current tmp
|
|
.macro get_current reg=%d0
|
|
movel %sp,\reg
|
|
andl #-THREAD_SIZE,\reg
|
|
movel \reg,%curptr
|
|
movel %curptr@,%curptr
|
|
.endm
|
|
|
|
#else
|
|
|
|
#define GET_CURRENT(tmp)
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
#else /* C source */
|
|
|
|
#define STR(X) STR1(X)
|
|
#define STR1(X) #X
|
|
|
|
#define SAVE_ALL_INT \
|
|
"clrl %%sp@-;" /* stk_adj */ \
|
|
"pea -1:w;" /* orig d0 = -1 */ \
|
|
"movel %%d0,%%sp@-;" /* d0 */ \
|
|
"moveml %%d1-%%d5/%%a0-%%a2,%%sp@-"
|
|
|
|
#define GET_CURRENT(tmp) \
|
|
"movel %%sp,"#tmp"\n\t" \
|
|
"andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \
|
|
"movel "#tmp",%%a2\n\t" \
|
|
"movel %%a2@,%%a2"
|
|
|
|
#endif
|
|
|
|
#endif /* __M68K_ENTRY_H */
|