forked from luck/tmp_suning_uos_patched
1d015cf02a
This patch reworks the sh3/sh4/sh4a register saving code in the following ways: - break out prepare_stack_save_dsp() from handle_exception() - break out save_regs() from handle_exception() - the register saving order is unchanged - align new functions to fit in cache lines - separate exception code from interrupt code - keep main code flow in a single cache line per exception vector - use bsr/rts for regular functions (save pr first) - keep data in one shared cache line (exception_data) - document the functions - tie in the hp6xx code Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
44 lines
739 B
ArmAsm
44 lines
739 B
ArmAsm
/*
|
|
* Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com>
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <cpu/mmu_context.h>
|
|
|
|
/*
|
|
* Kernel mode register usage:
|
|
* k0 scratch
|
|
* k1 scratch
|
|
* For more details, please have a look at entry.S
|
|
*/
|
|
|
|
#define k0 r0
|
|
#define k1 r1
|
|
|
|
ENTRY(wakeup_start)
|
|
! clear STBY bit
|
|
mov #-126, k1
|
|
and #127, k0
|
|
mov.b k0, @k1
|
|
! enable refresh
|
|
mov.l 5f, k1
|
|
mov.w 6f, k0
|
|
mov.w k0, @k1
|
|
! jump to handler
|
|
mov.l 4f, k1
|
|
jmp @k1
|
|
nop
|
|
|
|
.align 2
|
|
4: .long handle_interrupt
|
|
5: .long 0xffffff68
|
|
6: .word 0x0524
|
|
|
|
ENTRY(wakeup_end)
|
|
nop
|