forked from luck/tmp_suning_uos_patched
KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool
Unlike normal 'int' functions returning '0' on success, kvm_setup_async_pf()/ kvm_arch_setup_async_pf() return '1' when a job to handle page fault asynchronously was scheduled and '0' otherwise. To avoid the confusion change return type to 'bool'. No functional change intended. Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20200615121334.91300-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9ce372b33a
commit
e8c22266e6
|
@ -3954,33 +3954,31 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
|
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
hva_t hva;
|
hva_t hva;
|
||||||
struct kvm_arch_async_pf arch;
|
struct kvm_arch_async_pf arch;
|
||||||
int rc;
|
|
||||||
|
|
||||||
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
|
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
|
||||||
return 0;
|
return false;
|
||||||
if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
|
if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
|
||||||
vcpu->arch.pfault_compare)
|
vcpu->arch.pfault_compare)
|
||||||
return 0;
|
return false;
|
||||||
if (psw_extint_disabled(vcpu))
|
if (psw_extint_disabled(vcpu))
|
||||||
return 0;
|
return false;
|
||||||
if (kvm_s390_vcpu_has_irq(vcpu, 0))
|
if (kvm_s390_vcpu_has_irq(vcpu, 0))
|
||||||
return 0;
|
return false;
|
||||||
if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
|
if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
|
||||||
return 0;
|
return false;
|
||||||
if (!vcpu->arch.gmap->pfault_enabled)
|
if (!vcpu->arch.gmap->pfault_enabled)
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
|
hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
|
||||||
hva += current->thread.gmap_addr & ~PAGE_MASK;
|
hva += current->thread.gmap_addr & ~PAGE_MASK;
|
||||||
if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
|
if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
|
return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vcpu_pre_run(struct kvm_vcpu *vcpu)
|
static int vcpu_pre_run(struct kvm_vcpu *vcpu)
|
||||||
|
|
|
@ -4045,8 +4045,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
|
||||||
walk_shadow_page_lockless_end(vcpu);
|
walk_shadow_page_lockless_end(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||||
gfn_t gfn)
|
gfn_t gfn)
|
||||||
{
|
{
|
||||||
struct kvm_arch_async_pf arch;
|
struct kvm_arch_async_pf arch;
|
||||||
|
|
||||||
|
|
|
@ -211,8 +211,8 @@ struct kvm_async_pf {
|
||||||
|
|
||||||
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
|
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
|
||||||
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
|
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
|
||||||
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||||
unsigned long hva, struct kvm_arch_async_pf *arch);
|
unsigned long hva, struct kvm_arch_async_pf *arch);
|
||||||
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
|
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -156,17 +156,21 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
/*
|
||||||
unsigned long hva, struct kvm_arch_async_pf *arch)
|
* Try to schedule a job to handle page fault asynchronously. Returns 'true' on
|
||||||
|
* success, 'false' on failure (page fault has to be handled synchronously).
|
||||||
|
*/
|
||||||
|
bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||||
|
unsigned long hva, struct kvm_arch_async_pf *arch)
|
||||||
{
|
{
|
||||||
struct kvm_async_pf *work;
|
struct kvm_async_pf *work;
|
||||||
|
|
||||||
if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
|
if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
/* Arch specific code should not do async PF in this case */
|
/* Arch specific code should not do async PF in this case */
|
||||||
if (unlikely(kvm_is_error_hva(hva)))
|
if (unlikely(kvm_is_error_hva(hva)))
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* do alloc nowait since if we are going to sleep anyway we
|
* do alloc nowait since if we are going to sleep anyway we
|
||||||
|
@ -174,7 +178,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||||
*/
|
*/
|
||||||
work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
|
work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
|
||||||
if (!work)
|
if (!work)
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
work->wakeup_all = false;
|
work->wakeup_all = false;
|
||||||
work->vcpu = vcpu;
|
work->vcpu = vcpu;
|
||||||
|
@ -193,7 +197,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||||
|
|
||||||
schedule_work(&work->work);
|
schedule_work(&work->work);
|
||||||
|
|
||||||
return 1;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
|
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user