forked from luck/tmp_suning_uos_patched
x86/cpu: Relocate sync_core() to sync_core.h
Having sync_core() in processor.h is problematic since it is not possible to check for hardware capabilities via the *cpu_has() family of macros. The latter needs the definitions in processor.h. It also looks more intuitive to relocate the function to sync_core.h. This changeset does not make changes in functionality. Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Tony Luck <tony.luck@intel.com> Link: https://lore.kernel.org/r/20200727043132.15082-3-ricardo.neri-calderon@linux.intel.com
This commit is contained in:
parent
85b23fbc7d
commit
9998a9832c
|
@ -678,70 +678,6 @@ static inline unsigned int cpuid_edx(unsigned int op)
|
|||
return edx;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function forces the icache and prefetched instruction stream to
|
||||
* catch up with reality in two very specific cases:
|
||||
*
|
||||
* a) Text was modified using one virtual address and is about to be executed
|
||||
* from the same physical page at a different virtual address.
|
||||
*
|
||||
* b) Text was modified on a different CPU, may subsequently be
|
||||
* executed on this CPU, and you want to make sure the new version
|
||||
* gets executed. This generally means you're calling this in a IPI.
|
||||
*
|
||||
* If you're calling this for a different reason, you're probably doing
|
||||
* it wrong.
|
||||
*/
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
/*
|
||||
* There are quite a few ways to do this. IRET-to-self is nice
|
||||
* because it works on every CPU, at any CPL (so it's compatible
|
||||
* with paravirtualization), and it never exits to a hypervisor.
|
||||
* The only down sides are that it's a bit slow (it seems to be
|
||||
* a bit more than 2x slower than the fastest options) and that
|
||||
* it unmasks NMIs. The "push %cs" is needed because, in
|
||||
* paravirtual environments, __KERNEL_CS may not be a valid CS
|
||||
* value when we do IRET directly.
|
||||
*
|
||||
* In case NMI unmasking or performance ever becomes a problem,
|
||||
* the next best option appears to be MOV-to-CR2 and an
|
||||
* unconditional jump. That sequence also works on all CPUs,
|
||||
* but it will fault at CPL3 (i.e. Xen PV).
|
||||
*
|
||||
* CPUID is the conventional way, but it's nasty: it doesn't
|
||||
* exist on some 486-like CPUs, and it usually exits to a
|
||||
* hypervisor.
|
||||
*
|
||||
* Like all of Linux's memory ordering operations, this is a
|
||||
* compiler barrier as well.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
asm volatile (
|
||||
"pushfl\n\t"
|
||||
"pushl %%cs\n\t"
|
||||
"pushl $1f\n\t"
|
||||
"iret\n\t"
|
||||
"1:"
|
||||
: ASM_CALL_CONSTRAINT : : "memory");
|
||||
#else
|
||||
unsigned int tmp;
|
||||
|
||||
asm volatile (
|
||||
"mov %%ss, %0\n\t"
|
||||
"pushq %q0\n\t"
|
||||
"pushq %%rsp\n\t"
|
||||
"addq $8, (%%rsp)\n\t"
|
||||
"pushfq\n\t"
|
||||
"mov %%cs, %0\n\t"
|
||||
"pushq %q0\n\t"
|
||||
"pushq $1f\n\t"
|
||||
"iretq\n\t"
|
||||
"1:"
|
||||
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
extern void select_idle_routine(const struct cpuinfo_x86 *c);
|
||||
extern void amd_e400_c1e_apic_setup(void);
|
||||
|
||||
|
|
|
@ -6,6 +6,70 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
* This function forces the icache and prefetched instruction stream to
|
||||
* catch up with reality in two very specific cases:
|
||||
*
|
||||
* a) Text was modified using one virtual address and is about to be executed
|
||||
* from the same physical page at a different virtual address.
|
||||
*
|
||||
* b) Text was modified on a different CPU, may subsequently be
|
||||
* executed on this CPU, and you want to make sure the new version
|
||||
* gets executed. This generally means you're calling this in a IPI.
|
||||
*
|
||||
* If you're calling this for a different reason, you're probably doing
|
||||
* it wrong.
|
||||
*/
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
/*
|
||||
* There are quite a few ways to do this. IRET-to-self is nice
|
||||
* because it works on every CPU, at any CPL (so it's compatible
|
||||
* with paravirtualization), and it never exits to a hypervisor.
|
||||
* The only down sides are that it's a bit slow (it seems to be
|
||||
* a bit more than 2x slower than the fastest options) and that
|
||||
* it unmasks NMIs. The "push %cs" is needed because, in
|
||||
* paravirtual environments, __KERNEL_CS may not be a valid CS
|
||||
* value when we do IRET directly.
|
||||
*
|
||||
* In case NMI unmasking or performance ever becomes a problem,
|
||||
* the next best option appears to be MOV-to-CR2 and an
|
||||
* unconditional jump. That sequence also works on all CPUs,
|
||||
* but it will fault at CPL3 (i.e. Xen PV).
|
||||
*
|
||||
* CPUID is the conventional way, but it's nasty: it doesn't
|
||||
* exist on some 486-like CPUs, and it usually exits to a
|
||||
* hypervisor.
|
||||
*
|
||||
* Like all of Linux's memory ordering operations, this is a
|
||||
* compiler barrier as well.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
asm volatile (
|
||||
"pushfl\n\t"
|
||||
"pushl %%cs\n\t"
|
||||
"pushl $1f\n\t"
|
||||
"iret\n\t"
|
||||
"1:"
|
||||
: ASM_CALL_CONSTRAINT : : "memory");
|
||||
#else
|
||||
unsigned int tmp;
|
||||
|
||||
asm volatile (
|
||||
"mov %%ss, %0\n\t"
|
||||
"pushq %q0\n\t"
|
||||
"pushq %%rsp\n\t"
|
||||
"addq $8, (%%rsp)\n\t"
|
||||
"pushfq\n\t"
|
||||
"mov %%cs, %0\n\t"
|
||||
"pushq %q0\n\t"
|
||||
"pushq $1f\n\t"
|
||||
"iretq\n\t"
|
||||
"1:"
|
||||
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that a core serializing instruction is issued before returning
|
||||
* to user-mode. x86 implements return to user-space through sysexit,
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/kprobes.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/sync_core.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/sections.h>
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/sync_core.h>
|
||||
#include <linux/task_work.h>
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/sync_core.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include "gru.h"
|
||||
#include "grutables.h"
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
|
||||
#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
|
||||
#else
|
||||
#include <linux/sync_core.h>
|
||||
#include <asm/tsc.h>
|
||||
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
|
||||
#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/miscdevice.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sync_core.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
|
|
Loading…
Reference in New Issue
Block a user