forked from luck/tmp_suning_uos_patched
KVM: rename kvm_arch_can_inject_async_page_present() to kvm_arch_can_dequeue_async_page_present()
An innocent reader of the following x86 KVM code: bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; ... may get very confused: if APF mechanism is not enabled, why do we report that we 'can inject async page present'? In reality, upon injection kvm_arch_async_page_present() will check the same condition again and, in case APF is disabled, will just drop the item. This is fine as the guest which deliberately disabled APF doesn't expect to get any APF notifications. Rename kvm_arch_can_inject_async_page_present() to kvm_arch_can_dequeue_async_page_present() to make it clear what we are checking: if the item can be dequeued (meaning either injected or just dropped). On s390 kvm_arch_can_inject_async_page_present() always returns 'true' so the rename doesn't matter much. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20200525144125.143875-4-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
68fd66f100
commit
7c0ade6c90
|
@ -973,7 +973,7 @@ struct kvm_arch_async_pf {
|
|||
unsigned long pfault_token;
|
||||
};
|
||||
|
||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
|
||||
struct kvm_async_pf *work);
|
||||
|
|
|
@ -3943,7 +3943,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
|
|||
/* s390 will always inject the page directly */
|
||||
}
|
||||
|
||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
||||
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* s390 will always inject the page directly,
|
||||
|
|
|
@ -1660,7 +1660,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
|
|||
struct kvm_async_pf *work);
|
||||
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
|
||||
struct kvm_async_pf *work);
|
||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
|
||||
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
|
||||
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -10521,7 +10521,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
}
|
||||
|
||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
||||
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
|
||||
return true;
|
||||
|
|
|
@ -134,7 +134,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
|
|||
struct kvm_async_pf *work;
|
||||
|
||||
while (!list_empty_careful(&vcpu->async_pf.done) &&
|
||||
kvm_arch_can_inject_async_page_present(vcpu)) {
|
||||
kvm_arch_can_dequeue_async_page_present(vcpu)) {
|
||||
spin_lock(&vcpu->async_pf.lock);
|
||||
work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
|
||||
link);
|
||||
|
|
Loading…
Reference in New Issue
Block a user