forked from luck/tmp_suning_uos_patched
X86/nVMX: Update the PML table without mapping and unmapping the page
Update the PML table without mapping and unmapping the page. This also avoids using kvm_vcpu_gpa_to_page(..) which assumes that there is a "struct page" for guest memory. As a side-effect of using kvm_write_guest_page the page is also properly marked as dirty. Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
2e408936b6
commit
3d5f6beb74
|
@ -7116,9 +7116,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct vmcs12 *vmcs12;
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
gpa_t gpa;
|
||||
struct page *page = NULL;
|
||||
u64 *pml_address;
|
||||
gpa_t gpa, dst;
|
||||
|
||||
if (is_guest_mode(vcpu)) {
|
||||
WARN_ON_ONCE(vmx->nested.pml_full);
|
||||
|
@ -7138,15 +7136,13 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
|
||||
dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
|
||||
|
||||
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
|
||||
if (is_error_page(page))
|
||||
if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
|
||||
offset_in_page(dst), sizeof(gpa)))
|
||||
return 0;
|
||||
|
||||
pml_address = kmap(page);
|
||||
pml_address[vmcs12->guest_pml_index--] = gpa;
|
||||
kunmap(page);
|
||||
kvm_release_page_clean(page);
|
||||
vmcs12->guest_pml_index--;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue
Block a user