forked from luck/tmp_suning_uos_patched
KVM: arm/arm64: Share common code in user_mem_abort()
The code for operations such as marking the pfn as dirty, and dcache/icache maintenance during stage 2 fault handling is duplicated between normal pages and PMD hugepages. Instead of creating another copy of the operations when we introduce PUD hugepages, let's share them across the different pagesizes. Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
60c3ab30d8
commit
3f58bf6345
|
@ -1475,7 +1475,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||||
unsigned long fault_status)
|
unsigned long fault_status)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
|
bool write_fault, exec_fault, writable, force_pte = false;
|
||||||
unsigned long mmu_seq;
|
unsigned long mmu_seq;
|
||||||
gfn_t gfn = fault_ipa >> PAGE_SHIFT;
|
gfn_t gfn = fault_ipa >> PAGE_SHIFT;
|
||||||
struct kvm *kvm = vcpu->kvm;
|
struct kvm *kvm = vcpu->kvm;
|
||||||
|
@ -1484,7 +1484,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||||
kvm_pfn_t pfn;
|
kvm_pfn_t pfn;
|
||||||
pgprot_t mem_type = PAGE_S2;
|
pgprot_t mem_type = PAGE_S2;
|
||||||
bool logging_active = memslot_is_logging(memslot);
|
bool logging_active = memslot_is_logging(memslot);
|
||||||
unsigned long flags = 0;
|
unsigned long vma_pagesize, flags = 0;
|
||||||
|
|
||||||
write_fault = kvm_is_write_fault(vcpu);
|
write_fault = kvm_is_write_fault(vcpu);
|
||||||
exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
|
exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
|
||||||
|
@ -1504,10 +1504,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
|
vma_pagesize = vma_kernel_pagesize(vma);
|
||||||
hugetlb = true;
|
if (vma_pagesize == PMD_SIZE && !logging_active) {
|
||||||
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
|
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
|
||||||
} else {
|
} else {
|
||||||
|
/*
|
||||||
|
* Fallback to PTE if it's not one of the Stage 2
|
||||||
|
* supported hugepage sizes
|
||||||
|
*/
|
||||||
|
vma_pagesize = PAGE_SIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pages belonging to memslots that don't have the same
|
* Pages belonging to memslots that don't have the same
|
||||||
* alignment for userspace and IPA cannot be mapped using
|
* alignment for userspace and IPA cannot be mapped using
|
||||||
|
@ -1573,23 +1579,33 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (!hugetlb && !force_pte)
|
if (vma_pagesize == PAGE_SIZE && !force_pte) {
|
||||||
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
|
/*
|
||||||
|
* Only PMD_SIZE transparent hugepages(THP) are
|
||||||
|
* currently supported. This code will need to be
|
||||||
|
* updated to support other THP sizes.
|
||||||
|
*/
|
||||||
|
if (transparent_hugepage_adjust(&pfn, &fault_ipa))
|
||||||
|
vma_pagesize = PMD_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
if (hugetlb) {
|
if (writable)
|
||||||
|
kvm_set_pfn_dirty(pfn);
|
||||||
|
|
||||||
|
if (fault_status != FSC_PERM)
|
||||||
|
clean_dcache_guest_page(pfn, vma_pagesize);
|
||||||
|
|
||||||
|
if (exec_fault)
|
||||||
|
invalidate_icache_guest_page(pfn, vma_pagesize);
|
||||||
|
|
||||||
|
if (vma_pagesize == PMD_SIZE) {
|
||||||
pmd_t new_pmd = pfn_pmd(pfn, mem_type);
|
pmd_t new_pmd = pfn_pmd(pfn, mem_type);
|
||||||
new_pmd = pmd_mkhuge(new_pmd);
|
new_pmd = pmd_mkhuge(new_pmd);
|
||||||
if (writable) {
|
if (writable)
|
||||||
new_pmd = kvm_s2pmd_mkwrite(new_pmd);
|
new_pmd = kvm_s2pmd_mkwrite(new_pmd);
|
||||||
kvm_set_pfn_dirty(pfn);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fault_status != FSC_PERM)
|
|
||||||
clean_dcache_guest_page(pfn, PMD_SIZE);
|
|
||||||
|
|
||||||
if (exec_fault) {
|
if (exec_fault) {
|
||||||
new_pmd = kvm_s2pmd_mkexec(new_pmd);
|
new_pmd = kvm_s2pmd_mkexec(new_pmd);
|
||||||
invalidate_icache_guest_page(pfn, PMD_SIZE);
|
|
||||||
} else if (fault_status == FSC_PERM) {
|
} else if (fault_status == FSC_PERM) {
|
||||||
/* Preserve execute if XN was already cleared */
|
/* Preserve execute if XN was already cleared */
|
||||||
if (stage2_is_exec(kvm, fault_ipa))
|
if (stage2_is_exec(kvm, fault_ipa))
|
||||||
|
@ -1602,16 +1618,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||||
|
|
||||||
if (writable) {
|
if (writable) {
|
||||||
new_pte = kvm_s2pte_mkwrite(new_pte);
|
new_pte = kvm_s2pte_mkwrite(new_pte);
|
||||||
kvm_set_pfn_dirty(pfn);
|
|
||||||
mark_page_dirty(kvm, gfn);
|
mark_page_dirty(kvm, gfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fault_status != FSC_PERM)
|
|
||||||
clean_dcache_guest_page(pfn, PAGE_SIZE);
|
|
||||||
|
|
||||||
if (exec_fault) {
|
if (exec_fault) {
|
||||||
new_pte = kvm_s2pte_mkexec(new_pte);
|
new_pte = kvm_s2pte_mkexec(new_pte);
|
||||||
invalidate_icache_guest_page(pfn, PAGE_SIZE);
|
|
||||||
} else if (fault_status == FSC_PERM) {
|
} else if (fault_status == FSC_PERM) {
|
||||||
/* Preserve execute if XN was already cleared */
|
/* Preserve execute if XN was already cleared */
|
||||||
if (stage2_is_exec(kvm, fault_ipa))
|
if (stage2_is_exec(kvm, fault_ipa))
|
||||||
|
|
Loading…
Reference in New Issue
Block a user