forked from luck/tmp_suning_uos_patched
247055aa21
This patch removes the domain switching functionality via the set_fs and __switch_to functions on cores that have a TLS register. Currently, the ioremap and vmalloc areas share the same level 1 page tables and therefore have the same domain (DOMAIN_KERNEL). When the kernel domain is modified from Client to Manager (via the __set_fs or in the __switch_to function), the XN (eXecute Never) bit is overridden and newer CPUs can speculatively prefetch the ioremap'ed memory. Linux performs the kernel domain switching to allow user-specific functions (copy_to/from_user, get/put_user etc.) to access kernel memory. In order for these functions to work with the kernel domain set to Client, the patch modifies the LDRT/STRT and related instructions to the LDR/STR ones. The user pages access rights are also modified for kernel read-only access rather than read/write so that the copy-on-write mechanism still works. CPU_USE_DOMAINS gets disabled only if the hardware has a TLS register (CPU_32v6K is defined) since writing the TLS value to the high vectors page isn't possible. The user addresses passed to the kernel are checked by the access_ok() function so that they do not point to the kernel space. Tested-by: Anton Vorontsov <cbouatmailru@gmail.com> Cc: Tony Lindgren <tony@atomide.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
74 lines
1.6 KiB
ArmAsm
74 lines
1.6 KiB
ArmAsm
/*
|
|
* linux/arch/arm/lib/getuser.S
|
|
*
|
|
* Copyright (C) 2001 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* Idea from x86 version, (C) Copyright 1998 Linus Torvalds
|
|
*
|
|
* These functions have a non-standard call interface to make them more
|
|
* efficient, especially as they return an error value in addition to
|
|
* the "real" return value.
|
|
*
|
|
* __get_user_X
|
|
*
|
|
* Inputs: r0 contains the address
|
|
* Outputs: r0 is the error code
|
|
* r2, r3 contains the zero-extended value
|
|
* lr corrupted
|
|
*
|
|
* No other registers must be altered. (see <asm/uaccess.h>
|
|
* for specific ASM register usage).
|
|
*
|
|
* Note that ADDR_LIMIT is either 0 or 0xc0000000.
|
|
* Note also that it is intended that __get_user_bad is not global.
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/domain.h>
|
|
|
|
ENTRY(__get_user_1)
|
|
1: T(ldrb) r2, [r0]
|
|
mov r0, #0
|
|
mov pc, lr
|
|
ENDPROC(__get_user_1)
|
|
|
|
ENTRY(__get_user_2)
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
2: T(ldrb) r2, [r0]
|
|
3: T(ldrb) r3, [r0, #1]
|
|
#else
|
|
2: T(ldrb) r2, [r0], #1
|
|
3: T(ldrb) r3, [r0]
|
|
#endif
|
|
#ifndef __ARMEB__
|
|
orr r2, r2, r3, lsl #8
|
|
#else
|
|
orr r2, r3, r2, lsl #8
|
|
#endif
|
|
mov r0, #0
|
|
mov pc, lr
|
|
ENDPROC(__get_user_2)
|
|
|
|
ENTRY(__get_user_4)
|
|
4: T(ldr) r2, [r0]
|
|
mov r0, #0
|
|
mov pc, lr
|
|
ENDPROC(__get_user_4)
|
|
|
|
__get_user_bad:
|
|
mov r2, #0
|
|
mov r0, #-EFAULT
|
|
mov pc, lr
|
|
ENDPROC(__get_user_bad)
|
|
|
|
.pushsection __ex_table, "a"
|
|
.long 1b, __get_user_bad
|
|
.long 2b, __get_user_bad
|
|
.long 3b, __get_user_bad
|
|
.long 4b, __get_user_bad
|
|
.popsection
|