forked from luck/tmp_suning_uos_patched
x86/paravirt: Change vcp_is_preempted() arg type to long
The cpu argument in the function prototype of vcpu_is_preempted() is changed from int to long. That makes it easier to provide a better optimized assembly version of that function. For Xen, vcpu_is_preempted(long) calls xen_vcpu_stolen(int), the downcast from long to int is not a problem as vCPU number won't exceed 32 bits. Signed-off-by: Waiman Long <longman@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
96794e4ed4
commit
6c62985d57
|
@ -673,7 +673,7 @@ static __always_inline void pv_kick(int cpu)
|
|||
PVOP_VCALL1(pv_lock_ops.kick, cpu);
|
||||
}
|
||||
|
||||
static __always_inline bool pv_vcpu_is_preempted(int cpu)
|
||||
static __always_inline bool pv_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
|
|||
}
|
||||
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(int cpu)
|
||||
static inline bool vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return pv_vcpu_is_preempted(cpu);
|
||||
}
|
||||
|
|
|
@ -589,7 +589,7 @@ static void kvm_wait(u8 *ptr, u8 val)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
__visible bool __kvm_vcpu_is_preempted(int cpu)
|
||||
__visible bool __kvm_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ bool pv_is_native_spin_unlock(void)
|
|||
__raw_callee_save___native_queued_spin_unlock;
|
||||
}
|
||||
|
||||
__visible bool __native_vcpu_is_preempted(int cpu)
|
||||
__visible bool __native_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user