forked from luck/tmp_suning_uos_patched
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Ingo writes: "x86 fixes: It's 4 misc fixes, 3 build warning fixes and 3 comment fixes. In hindsight I'd have left out the 3 comment fixes to make the pull request look less scary at such a late point in the cycle. :-/" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/swiotlb: Enable swiotlb for > 4GiG RAM on 32-bit kernels x86/fpu: Fix i486 + no387 boot crash by only saving FPU registers on context switch if there is an FPU x86/fpu: Remove second definition of fpu in __fpu__restore_sig() x86/entry/64: Further improve paranoid_entry comments x86/entry/32: Clear the CS high bits x86/boot: Add -Wno-pointer-sign to KBUILD_CFLAGS x86/time: Correct the attribute on jiffies' definition x86/entry: Add some paranoid entry/exit CR3 handling comments x86/percpu: Fix this_cpu_read() x86/tsc: Force inlining of cyc2ns bits
This commit is contained in:
commit
b0d04fb56b
|
@ -37,6 +37,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
|
||||||
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
|
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
|
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
|
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
|
||||||
|
KBUILD_CFLAGS += -Wno-pointer-sign
|
||||||
|
|
||||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||||
GCOV_PROFILE := n
|
GCOV_PROFILE := n
|
||||||
|
|
|
@ -389,6 +389,13 @@
|
||||||
* that register for the time this macro runs
|
* that register for the time this macro runs
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The high bits of the CS dword (__csh) are used for
|
||||||
|
* CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case
|
||||||
|
* hardware didn't do this for us.
|
||||||
|
*/
|
||||||
|
andl $(0x0000ffff), PT_CS(%esp)
|
||||||
|
|
||||||
/* Are we on the entry stack? Bail out if not! */
|
/* Are we on the entry stack? Bail out if not! */
|
||||||
movl PER_CPU_VAR(cpu_entry_area), %ecx
|
movl PER_CPU_VAR(cpu_entry_area), %ecx
|
||||||
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
|
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
|
||||||
|
@ -407,12 +414,6 @@
|
||||||
/* Load top of task-stack into %edi */
|
/* Load top of task-stack into %edi */
|
||||||
movl TSS_entry2task_stack(%edi), %edi
|
movl TSS_entry2task_stack(%edi), %edi
|
||||||
|
|
||||||
/*
|
|
||||||
* Clear unused upper bits of the dword containing the word-sized CS
|
|
||||||
* slot in pt_regs in case hardware didn't clear it for us.
|
|
||||||
*/
|
|
||||||
andl $(0x0000ffff), PT_CS(%esp)
|
|
||||||
|
|
||||||
/* Special case - entry from kernel mode via entry stack */
|
/* Special case - entry from kernel mode via entry stack */
|
||||||
#ifdef CONFIG_VM86
|
#ifdef CONFIG_VM86
|
||||||
movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
|
movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
|
||||||
|
|
|
@ -1187,6 +1187,16 @@ ENTRY(paranoid_entry)
|
||||||
xorl %ebx, %ebx
|
xorl %ebx, %ebx
|
||||||
|
|
||||||
1:
|
1:
|
||||||
|
/*
|
||||||
|
* Always stash CR3 in %r14. This value will be restored,
|
||||||
|
* verbatim, at exit. Needed if paranoid_entry interrupted
|
||||||
|
* another entry that already switched to the user CR3 value
|
||||||
|
* but has not yet returned to userspace.
|
||||||
|
*
|
||||||
|
* This is also why CS (stashed in the "iret frame" by the
|
||||||
|
* hardware at entry) can not be used: this may be a return
|
||||||
|
* to kernel code, but with a user CR3 value.
|
||||||
|
*/
|
||||||
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
|
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
|
||||||
|
|
||||||
ret
|
ret
|
||||||
|
@ -1211,11 +1221,13 @@ ENTRY(paranoid_exit)
|
||||||
testl %ebx, %ebx /* swapgs needed? */
|
testl %ebx, %ebx /* swapgs needed? */
|
||||||
jnz .Lparanoid_exit_no_swapgs
|
jnz .Lparanoid_exit_no_swapgs
|
||||||
TRACE_IRQS_IRETQ
|
TRACE_IRQS_IRETQ
|
||||||
|
/* Always restore stashed CR3 value (see paranoid_entry) */
|
||||||
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
|
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
|
||||||
SWAPGS_UNSAFE_STACK
|
SWAPGS_UNSAFE_STACK
|
||||||
jmp .Lparanoid_exit_restore
|
jmp .Lparanoid_exit_restore
|
||||||
.Lparanoid_exit_no_swapgs:
|
.Lparanoid_exit_no_swapgs:
|
||||||
TRACE_IRQS_IRETQ_DEBUG
|
TRACE_IRQS_IRETQ_DEBUG
|
||||||
|
/* Always restore stashed CR3 value (see paranoid_entry) */
|
||||||
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
|
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
|
||||||
.Lparanoid_exit_restore:
|
.Lparanoid_exit_restore:
|
||||||
jmp restore_regs_and_return_to_kernel
|
jmp restore_regs_and_return_to_kernel
|
||||||
|
@ -1626,6 +1638,7 @@ end_repeat_nmi:
|
||||||
movq $-1, %rsi
|
movq $-1, %rsi
|
||||||
call do_nmi
|
call do_nmi
|
||||||
|
|
||||||
|
/* Always restore stashed CR3 value (see paranoid_entry) */
|
||||||
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
|
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
|
||||||
|
|
||||||
testl %ebx, %ebx /* swapgs needed? */
|
testl %ebx, %ebx /* swapgs needed? */
|
||||||
|
|
|
@ -528,7 +528,7 @@ static inline void fpregs_activate(struct fpu *fpu)
|
||||||
static inline void
|
static inline void
|
||||||
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||||
{
|
{
|
||||||
if (old_fpu->initialized) {
|
if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
|
||||||
if (!copy_fpregs_to_fpstate(old_fpu))
|
if (!copy_fpregs_to_fpstate(old_fpu))
|
||||||
old_fpu->last_cpu = -1;
|
old_fpu->last_cpu = -1;
|
||||||
else
|
else
|
||||||
|
|
|
@ -185,22 +185,22 @@ do { \
|
||||||
typeof(var) pfo_ret__; \
|
typeof(var) pfo_ret__; \
|
||||||
switch (sizeof(var)) { \
|
switch (sizeof(var)) { \
|
||||||
case 1: \
|
case 1: \
|
||||||
asm(op "b "__percpu_arg(1)",%0" \
|
asm volatile(op "b "__percpu_arg(1)",%0"\
|
||||||
: "=q" (pfo_ret__) \
|
: "=q" (pfo_ret__) \
|
||||||
: "m" (var)); \
|
: "m" (var)); \
|
||||||
break; \
|
break; \
|
||||||
case 2: \
|
case 2: \
|
||||||
asm(op "w "__percpu_arg(1)",%0" \
|
asm volatile(op "w "__percpu_arg(1)",%0"\
|
||||||
: "=r" (pfo_ret__) \
|
: "=r" (pfo_ret__) \
|
||||||
: "m" (var)); \
|
: "m" (var)); \
|
||||||
break; \
|
break; \
|
||||||
case 4: \
|
case 4: \
|
||||||
asm(op "l "__percpu_arg(1)",%0" \
|
asm volatile(op "l "__percpu_arg(1)",%0"\
|
||||||
: "=r" (pfo_ret__) \
|
: "=r" (pfo_ret__) \
|
||||||
: "m" (var)); \
|
: "m" (var)); \
|
||||||
break; \
|
break; \
|
||||||
case 8: \
|
case 8: \
|
||||||
asm(op "q "__percpu_arg(1)",%0" \
|
asm volatile(op "q "__percpu_arg(1)",%0"\
|
||||||
: "=r" (pfo_ret__) \
|
: "=r" (pfo_ret__) \
|
||||||
: "m" (var)); \
|
: "m" (var)); \
|
||||||
break; \
|
break; \
|
||||||
|
|
|
@ -314,7 +314,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||||
* thread's fpu state, reconstruct fxstate from the fsave
|
* thread's fpu state, reconstruct fxstate from the fsave
|
||||||
* header. Validate and sanitize the copied state.
|
* header. Validate and sanitize the copied state.
|
||||||
*/
|
*/
|
||||||
struct fpu *fpu = &tsk->thread.fpu;
|
|
||||||
struct user_i387_ia32_struct env;
|
struct user_i387_ia32_struct env;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
|
|
@ -42,10 +42,8 @@ IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
|
||||||
int __init pci_swiotlb_detect_4gb(void)
|
int __init pci_swiotlb_detect_4gb(void)
|
||||||
{
|
{
|
||||||
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
|
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
|
||||||
swiotlb = 1;
|
swiotlb = 1;
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If SME is active then swiotlb will be set to 1 so that bounce
|
* If SME is active then swiotlb will be set to 1 so that bounce
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include <asm/time.h>
|
#include <asm/time.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
|
__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
unsigned long profile_pc(struct pt_regs *regs)
|
unsigned long profile_pc(struct pt_regs *regs)
|
||||||
|
|
|
@ -58,7 +58,7 @@ struct cyc2ns {
|
||||||
|
|
||||||
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
|
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
|
||||||
|
|
||||||
void cyc2ns_read_begin(struct cyc2ns_data *data)
|
void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
|
||||||
{
|
{
|
||||||
int seq, idx;
|
int seq, idx;
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ void cyc2ns_read_begin(struct cyc2ns_data *data)
|
||||||
} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
|
} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void cyc2ns_read_end(void)
|
void __always_inline cyc2ns_read_end(void)
|
||||||
{
|
{
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
}
|
}
|
||||||
|
@ -104,7 +104,7 @@ void cyc2ns_read_end(void)
|
||||||
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
|
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
||||||
{
|
{
|
||||||
struct cyc2ns_data data;
|
struct cyc2ns_data data;
|
||||||
unsigned long long ns;
|
unsigned long long ns;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user