forked from luck/tmp_suning_uos_patched
ARC: Code cosmetics (Nothing semantical)
* reduce editor lines taken by pt_regs * ARCompact ISA specific part of TLB Miss handlers clubbed together * cleanup some comments Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
This commit is contained in:
parent
fce16bc35a
commit
4b06ff35fb
|
@ -20,27 +20,17 @@ struct pt_regs {
|
|||
|
||||
/* Real registers */
|
||||
long bta; /* bta_l1, bta_l2, erbta */
|
||||
long lp_start;
|
||||
long lp_end;
|
||||
long lp_count;
|
||||
|
||||
long lp_start, lp_end, lp_count;
|
||||
|
||||
long status32; /* status32_l1, status32_l2, erstatus */
|
||||
long ret; /* ilink1, ilink2 or eret */
|
||||
long blink;
|
||||
long fp;
|
||||
long r26; /* gp */
|
||||
long r12;
|
||||
long r11;
|
||||
long r10;
|
||||
long r9;
|
||||
long r8;
|
||||
long r7;
|
||||
long r6;
|
||||
long r5;
|
||||
long r4;
|
||||
long r3;
|
||||
long r2;
|
||||
long r1;
|
||||
long r0;
|
||||
|
||||
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
|
||||
|
||||
long sp; /* user/kernel sp depending on where we came from */
|
||||
long orig_r0;
|
||||
|
||||
|
@ -70,19 +60,7 @@ struct pt_regs {
|
|||
/* Callee saved registers - need to be saved only when you are scheduled out */
|
||||
|
||||
struct callee_regs {
|
||||
long r25;
|
||||
long r24;
|
||||
long r23;
|
||||
long r22;
|
||||
long r21;
|
||||
long r20;
|
||||
long r19;
|
||||
long r18;
|
||||
long r17;
|
||||
long r16;
|
||||
long r15;
|
||||
long r14;
|
||||
long r13;
|
||||
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
||||
};
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->ret)
|
||||
|
|
|
@ -20,9 +20,9 @@ typedef struct {
|
|||
#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
|
||||
|
||||
/*
|
||||
* Unlocked: 0x01_00_00_00
|
||||
* Read lock(s): 0x00_FF_00_00 to say 0x01
|
||||
* Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
|
||||
* Unlocked : 0x0100_0000
|
||||
* Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
|
||||
* Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
|
||||
*/
|
||||
typedef struct {
|
||||
volatile unsigned int counter;
|
||||
|
|
|
@ -622,12 +622,12 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
|
|||
/*
|
||||
* General purpose helper to make I and D cache lines consistent.
|
||||
* @paddr is phy addr of region
|
||||
* @vaddr is typically user or kernel vaddr (vmalloc)
|
||||
* Howver in one instance, flush_icache_range() by kprobe (for a breakpt in
|
||||
* @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
|
||||
* However in one instance, when called by kprobe (for a breakpt in
|
||||
* builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
|
||||
* use a paddr to index the cache (despite VIPT). This is fine since since a
|
||||
* built-in kernel page will not have any virtual mappings (not even kernel)
|
||||
* kprobe on loadable module is different as it will have kvaddr.
|
||||
* builtin kernel page will not have any virtual mappings.
|
||||
* kprobe on loadable module will be kernel vaddr.
|
||||
*/
|
||||
void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
|
||||
{
|
||||
|
|
|
@ -44,17 +44,36 @@
|
|||
#include <asm/arcregs.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/processor.h>
|
||||
#if (CONFIG_ARC_MMU_VER == 1)
|
||||
#include <asm/tlb-mmu1.h>
|
||||
#endif
|
||||
|
||||
;--------------------------------------------------------------------------
|
||||
; scratch memory to save the registers (r0-r3) used to code TLB refill Handler
|
||||
; For details refer to comments before TLBMISS_FREEUP_REGS below
|
||||
;-----------------------------------------------------------------
|
||||
; ARC700 Exception Handling doesn't auto-switch stack and it only provides
|
||||
; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
|
||||
;
|
||||
; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
|
||||
; "global" is used to free-up FIRST core reg to be able to code the rest of
|
||||
; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
|
||||
; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
|
||||
; need to be saved as well by extending the "global" to be 4 words. Hence
|
||||
; ".size ex_saved_reg1, 16"
|
||||
; [All of this dance is to avoid stack switching for each TLB Miss, since we
|
||||
; only need to save only a handful of regs, as opposed to complete reg file]
|
||||
;
|
||||
; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
|
||||
; core reg as it will not be SMP safe.
|
||||
; Thus scratch AUX reg is used (and no longer used to cache task PGD).
|
||||
; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
|
||||
; Epilogue thus has to locate the "per-cpu" storage for regs.
|
||||
; To avoid cache line bouncing the per-cpu global is aligned/sized per
|
||||
; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
|
||||
; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
|
||||
|
||||
; As simple as that....
|
||||
;--------------------------------------------------------------------------
|
||||
|
||||
; scratch memory to save [r0-r3] used to code TLB refill Handler
|
||||
ARCFP_DATA ex_saved_reg1
|
||||
.align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned
|
||||
.align 1 << L1_CACHE_SHIFT
|
||||
.type ex_saved_reg1, @object
|
||||
#ifdef CONFIG_SMP
|
||||
.size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
|
||||
|
@ -66,6 +85,44 @@ ex_saved_reg1:
|
|||
.zero 16
|
||||
#endif
|
||||
|
||||
.macro TLBMISS_FREEUP_REGS
|
||||
#ifdef CONFIG_SMP
|
||||
sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
|
||||
GET_CPU_ID r0 ; get to per cpu scratch mem,
|
||||
lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
|
||||
add r0, @ex_saved_reg1, r0
|
||||
#else
|
||||
st r0, [@ex_saved_reg1]
|
||||
mov_s r0, @ex_saved_reg1
|
||||
#endif
|
||||
st_s r1, [r0, 4]
|
||||
st_s r2, [r0, 8]
|
||||
st_s r3, [r0, 12]
|
||||
|
||||
; VERIFY if the ASID in MMU-PID Reg is same as
|
||||
; one in Linux data structures
|
||||
|
||||
DBG_ASID_MISMATCH
|
||||
.endm
|
||||
|
||||
.macro TLBMISS_RESTORE_REGS
|
||||
#ifdef CONFIG_SMP
|
||||
GET_CPU_ID r0 ; get to per cpu scratch mem
|
||||
lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
|
||||
add r0, @ex_saved_reg1, r0
|
||||
ld_s r3, [r0,12]
|
||||
ld_s r2, [r0, 8]
|
||||
ld_s r1, [r0, 4]
|
||||
lr r0, [ARC_REG_SCRATCH_DATA0]
|
||||
#else
|
||||
mov_s r0, @ex_saved_reg1
|
||||
ld_s r3, [r0,12]
|
||||
ld_s r2, [r0, 8]
|
||||
ld_s r1, [r0, 4]
|
||||
ld_s r0, [r0]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
;============================================================================
|
||||
; Troubleshooting Stuff
|
||||
;============================================================================
|
||||
|
@ -191,68 +248,6 @@ ex_saved_reg1:
|
|||
#endif
|
||||
.endm
|
||||
|
||||
;-----------------------------------------------------------------
|
||||
; ARC700 Exception Handling doesn't auto-switch stack and it only provides
|
||||
; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
|
||||
;
|
||||
; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
|
||||
; "global" is used to free-up FIRST core reg to be able to code the rest of
|
||||
; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
|
||||
; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
|
||||
; need to be saved as well by extending the "global" to be 4 words. Hence
|
||||
; ".size ex_saved_reg1, 16"
|
||||
; [All of this dance is to avoid stack switching for each TLB Miss, since we
|
||||
; only need to save only a handful of regs, as opposed to complete reg file]
|
||||
;
|
||||
; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
|
||||
; core reg as it will not be SMP safe.
|
||||
; Thus scratch AUX reg is used (and no longer used to cache task PGD).
|
||||
; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
|
||||
; Epilogue thus has to locate the "per-cpu" storage for regs.
|
||||
; To avoid cache line bouncing the per-cpu global is aligned/sized per
|
||||
; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
|
||||
; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
|
||||
|
||||
; As simple as that....
|
||||
|
||||
.macro TLBMISS_FREEUP_REGS
|
||||
#ifdef CONFIG_SMP
|
||||
sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
|
||||
GET_CPU_ID r0 ; get to per cpu scratch mem,
|
||||
lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
|
||||
add r0, @ex_saved_reg1, r0
|
||||
#else
|
||||
st r0, [@ex_saved_reg1]
|
||||
mov_s r0, @ex_saved_reg1
|
||||
#endif
|
||||
st_s r1, [r0, 4]
|
||||
st_s r2, [r0, 8]
|
||||
st_s r3, [r0, 12]
|
||||
|
||||
; VERIFY if the ASID in MMU-PID Reg is same as
|
||||
; one in Linux data structures
|
||||
|
||||
DBG_ASID_MISMATCH
|
||||
.endm
|
||||
|
||||
;-----------------------------------------------------------------
|
||||
.macro TLBMISS_RESTORE_REGS
|
||||
#ifdef CONFIG_SMP
|
||||
GET_CPU_ID r0 ; get to per cpu scratch mem
|
||||
lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
|
||||
add r0, @ex_saved_reg1, r0
|
||||
ld_s r3, [r0,12]
|
||||
ld_s r2, [r0, 8]
|
||||
ld_s r1, [r0, 4]
|
||||
lr r0, [ARC_REG_SCRATCH_DATA0]
|
||||
#else
|
||||
mov_s r0, @ex_saved_reg1
|
||||
ld_s r3, [r0,12]
|
||||
ld_s r2, [r0, 8]
|
||||
ld_s r1, [r0, 4]
|
||||
ld_s r0, [r0]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
ARCFP_CODE ;Fast Path Code, candidate for ICCM
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user