forked from luck/tmp_suning_uos_patched
KVM/ARM changes for v3.20 including GICv3 emulation, dirty page logging, added
trace symbols, and adding an explicit VGIC init device control IOCTL. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJUwhsKAAoJEEtpOizt6ddyuSEH/ia2uf07N0i+C1dPKYiqhKEd nFqBvgrhAMVztWLmy1Wq4SnO9YNd+CrPYATrfCiYsYQ9aKc09+qDq+uo06bVpZXz KsHjVGUsdyJ4qRqjDixkPvZviGIXa6C//+hcwg1XH2nit1uHmXVupzB9dDz3ZM2l GCwApdRdaaUVDt5Ud2ljqIWZa18Qf/5/HD8MdPXpmotDOKucL6pBr/1R1XWueCU/ ejRs/qy3EFyMWdEdfGFAMCa0ZvHbPmsJmvB/EgkyUnuJj77ptA0jNo1jtzSfEyis 53x4ffWnIsPl9yqhk0oKerIALVUvV4A7/me2ya6tsQ5fiBX7lJ3+qwggvCkWQzw= =fMS2 -----END PGP SIGNATURE----- Merge tag 'kvm-arm-for-3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-next KVM/ARM changes for v3.20 including GICv3 emulation, dirty page logging, added trace symbols, and adding an explicit VGIC init device control IOCTL. Conflicts: arch/arm64/include/asm/kvm_arm.h arch/arm64/kvm/handle_exit.c
This commit is contained in:
commit
1c6007d59a
|
@ -612,11 +612,14 @@ Type: vm ioctl
|
|||
Parameters: none
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Creates an interrupt controller model in the kernel. On x86, creates a virtual
|
||||
ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
|
||||
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
|
||||
only go to the IOAPIC. On ARM/arm64, a GIC is
|
||||
created. On s390, a dummy irq routing table is created.
|
||||
Creates an interrupt controller model in the kernel.
|
||||
On x86, creates a virtual ioapic, a virtual PIC (two PICs, nested), and sets up
|
||||
future vcpus to have a local APIC. IRQ routing for GSIs 0-15 is set to both
|
||||
PIC and IOAPIC; GSI 16-23 only go to the IOAPIC.
|
||||
On ARM/arm64, a GICv2 is created. Any other GIC versions require the usage of
|
||||
KVM_CREATE_DEVICE, which also supports creating a GICv2. Using
|
||||
KVM_CREATE_DEVICE is preferred over KVM_CREATE_IRQCHIP for GICv2.
|
||||
On s390, a dummy irq routing table is created.
|
||||
|
||||
Note that on s390 the KVM_CAP_S390_IRQCHIP vm capability needs to be enabled
|
||||
before KVM_CREATE_IRQCHIP can be used.
|
||||
|
|
|
@ -3,22 +3,42 @@ ARM Virtual Generic Interrupt Controller (VGIC)
|
|||
|
||||
Device types supported:
|
||||
KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0
|
||||
KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0
|
||||
|
||||
Only one VGIC instance may be instantiated through either this API or the
|
||||
legacy KVM_CREATE_IRQCHIP api. The created VGIC will act as the VM interrupt
|
||||
controller, requiring emulated user-space devices to inject interrupts to the
|
||||
VGIC instead of directly to CPUs.
|
||||
|
||||
Creating a guest GICv3 device requires a host GICv3 as well.
|
||||
GICv3 implementations with hardware compatibility support allow a guest GICv2
|
||||
as well.
|
||||
|
||||
Groups:
|
||||
KVM_DEV_ARM_VGIC_GRP_ADDR
|
||||
Attributes:
|
||||
KVM_VGIC_V2_ADDR_TYPE_DIST (rw, 64-bit)
|
||||
Base address in the guest physical address space of the GIC distributor
|
||||
register mappings.
|
||||
register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2.
|
||||
This address needs to be 4K aligned and the region covers 4 KByte.
|
||||
|
||||
KVM_VGIC_V2_ADDR_TYPE_CPU (rw, 64-bit)
|
||||
Base address in the guest physical address space of the GIC virtual cpu
|
||||
interface register mappings.
|
||||
interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2.
|
||||
This address needs to be 4K aligned and the region covers 4 KByte.
|
||||
|
||||
KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit)
|
||||
Base address in the guest physical address space of the GICv3 distributor
|
||||
register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
|
||||
This address needs to be 64K aligned and the region covers 64 KByte.
|
||||
|
||||
KVM_VGIC_V3_ADDR_TYPE_REDIST (rw, 64-bit)
|
||||
Base address in the guest physical address space of the GICv3
|
||||
redistributor register mappings. There are two 64K pages for each
|
||||
VCPU and all of the redistributor pages are contiguous.
|
||||
Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
|
||||
This address needs to be 64K aligned.
|
||||
|
||||
|
||||
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
|
||||
Attributes:
|
||||
|
@ -36,6 +56,7 @@ Groups:
|
|||
the register.
|
||||
Limitations:
|
||||
- Priorities are not implemented, and registers are RAZ/WI
|
||||
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
|
||||
Errors:
|
||||
-ENODEV: Getting or setting this register is not yet supported
|
||||
-EBUSY: One or more VCPUs are running
|
||||
|
@ -68,6 +89,7 @@ Groups:
|
|||
|
||||
Limitations:
|
||||
- Priorities are not implemented, and registers are RAZ/WI
|
||||
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
|
||||
Errors:
|
||||
-ENODEV: Getting or setting this register is not yet supported
|
||||
-EBUSY: One or more VCPUs are running
|
||||
|
@ -81,3 +103,14 @@ Groups:
|
|||
-EINVAL: Value set is out of the expected range
|
||||
-EBUSY: Value has already be set, or GIC has already been initialized
|
||||
with default values.
|
||||
|
||||
KVM_DEV_ARM_VGIC_GRP_CTRL
|
||||
Attributes:
|
||||
KVM_DEV_ARM_VGIC_CTRL_INIT
|
||||
request the initialization of the VGIC, no additional parameter in
|
||||
kvm_device_attr.addr.
|
||||
Errors:
|
||||
-ENXIO: VGIC not properly configured as required prior to calling
|
||||
this attribute
|
||||
-ENODEV: no online VCPU
|
||||
-ENOMEM: memory shortage when allocating vgic internal data
|
||||
|
|
|
@ -96,6 +96,7 @@ extern char __kvm_hyp_code_end[];
|
|||
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/cputype.h>
|
||||
|
||||
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
||||
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
|
||||
|
@ -167,9 +168,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
|
|||
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
|
||||
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.cp15[c0_MPIDR];
|
||||
return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK;
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -68,6 +68,7 @@ struct kvm_arch {
|
|||
|
||||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
int max_vcpus;
|
||||
};
|
||||
|
||||
#define KVM_NR_MEM_OBJS 40
|
||||
|
@ -234,6 +235,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
|
|||
int kvm_perf_init(void);
|
||||
int kvm_perf_teardown(void);
|
||||
|
||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
|
||||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
||||
|
||||
static inline void kvm_arch_hardware_disable(void) {}
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
|
|
|
@ -37,6 +37,7 @@ struct kvm_exit_mmio {
|
|||
u8 data[8];
|
||||
u32 len;
|
||||
bool is_write;
|
||||
void *private;
|
||||
};
|
||||
|
||||
static inline void kvm_prepare_mmio(struct kvm_run *run,
|
||||
|
|
|
@ -114,6 +114,27 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
|
|||
pmd_val(*pmd) |= L_PMD_S2_RDWR;
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pte_readonly(pte_t *pte)
|
||||
{
|
||||
pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pte_readonly(pte_t *pte)
|
||||
{
|
||||
return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
|
||||
{
|
||||
pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
|
||||
{
|
||||
return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
|
||||
}
|
||||
|
||||
|
||||
/* Open coded p*d_addr_end that can deal with 64bit addresses */
|
||||
#define kvm_pgd_addr_end(addr, end) \
|
||||
({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
|
||||
|
|
|
@ -130,6 +130,7 @@
|
|||
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
|
||||
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
|
||||
|
||||
#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
|
||||
#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
|
||||
|
||||
/*
|
||||
|
|
|
@ -175,6 +175,8 @@ struct kvm_arch_memory_slot {
|
|||
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
|
||||
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
|
||||
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
|
||||
|
||||
/* KVM_IRQ_LINE irq field index values */
|
||||
#define KVM_ARM_IRQ_TYPE_SHIFT 24
|
||||
|
|
|
@ -21,8 +21,10 @@ config KVM
|
|||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
select KVM_MMIO
|
||||
select KVM_ARM_HOST
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
depends on ARM_VIRT_EXT && ARM_LPAE
|
||||
---help---
|
||||
Support hosting virtualized guest machines. You will also
|
||||
|
|
|
@ -22,4 +22,5 @@ obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
|
|||
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
|
||||
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
|
||||
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
|
||||
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
|
||||
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
|
||||
|
|
|
@ -132,6 +132,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
/* Mark the initial VMID generation invalid */
|
||||
kvm->arch.vmid_gen = 0;
|
||||
|
||||
/* The maximum number of VCPUs is limited by the host's GIC model */
|
||||
kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus();
|
||||
|
||||
return ret;
|
||||
out_free_stage2_pgd:
|
||||
kvm_free_stage2_pgd(kvm);
|
||||
|
@ -218,6 +221,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (id >= kvm->arch.max_vcpus) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
||||
if (!vcpu) {
|
||||
err = -ENOMEM;
|
||||
|
@ -787,9 +795,39 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
|
||||
* @kvm: kvm instance
|
||||
* @log: slot id and address to which we copy the log
|
||||
*
|
||||
* Steps 1-4 below provide general overview of dirty page logging. See
|
||||
* kvm_get_dirty_log_protect() function description for additional details.
|
||||
*
|
||||
* We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
|
||||
* always flush the TLB (step 4) even if previous step failed and the dirty
|
||||
* bitmap may be corrupt. Regardless of previous outcome the KVM logging API
|
||||
* does not preclude user space subsequent dirty log read. Flushing TLB ensures
|
||||
* writes will be marked dirty for next log read.
|
||||
*
|
||||
* 1. Take a snapshot of the bit and clear it if needed.
|
||||
* 2. Write protect the corresponding page.
|
||||
* 3. Copy the snapshot to the userspace.
|
||||
* 4. Flush TLB's if needed.
|
||||
*/
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||
{
|
||||
return -EINVAL;
|
||||
bool is_dirty = false;
|
||||
int r;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
|
||||
|
||||
if (is_dirty)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
|
||||
|
@ -821,7 +859,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
switch (ioctl) {
|
||||
case KVM_CREATE_IRQCHIP: {
|
||||
if (vgic_present)
|
||||
return kvm_vgic_create(kvm);
|
||||
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
else
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -1045,6 +1083,19 @@ static void check_kvm_target_cpu(void *ret)
|
|||
*(int *)ret = kvm_target_cpu();
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
mpidr &= MPIDR_HWID_BITMASK;
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
|
||||
return vcpu;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Hyp-mode and memory mappings on all CPUs.
|
||||
*/
|
||||
|
|
|
@ -87,11 +87,13 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
*/
|
||||
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
trace_kvm_wfi(*vcpu_pc(vcpu));
|
||||
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
|
||||
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
|
||||
trace_kvm_wfx(*vcpu_pc(vcpu), true);
|
||||
kvm_vcpu_on_spin(vcpu);
|
||||
else
|
||||
} else {
|
||||
trace_kvm_wfx(*vcpu_pc(vcpu), false);
|
||||
kvm_vcpu_block(vcpu);
|
||||
}
|
||||
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
|
||||
|
|
|
@ -66,6 +66,17 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
|
|||
bx lr
|
||||
ENDPROC(__kvm_tlb_flush_vmid_ipa)
|
||||
|
||||
/**
|
||||
* void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
|
||||
*
|
||||
* Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
|
||||
* parameter
|
||||
*/
|
||||
|
||||
ENTRY(__kvm_tlb_flush_vmid)
|
||||
b __kvm_tlb_flush_vmid_ipa
|
||||
ENDPROC(__kvm_tlb_flush_vmid)
|
||||
|
||||
/********************************************************************
|
||||
* Flush TLBs and instruction caches of all CPUs inside the inner-shareable
|
||||
* domain, for all VMIDs
|
||||
|
|
|
@ -45,6 +45,26 @@ static phys_addr_t hyp_idmap_vector;
|
|||
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
|
||||
|
||||
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
|
||||
#define kvm_pud_huge(_x) pud_huge(_x)
|
||||
|
||||
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
|
||||
#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
|
||||
|
||||
static bool memslot_is_logging(struct kvm_memory_slot *memslot)
|
||||
{
|
||||
return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
|
||||
* @kvm: pointer to kvm structure.
|
||||
*
|
||||
* Interface to HYP function to flush all VM TLB entries
|
||||
*/
|
||||
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
||||
{
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
|
||||
}
|
||||
|
||||
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
{
|
||||
|
@ -58,6 +78,25 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
|||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_dissolve_pmd() - clear and flush huge PMD entry
|
||||
* @kvm: pointer to kvm structure.
|
||||
* @addr: IPA
|
||||
* @pmd: pmd pointer for IPA
|
||||
*
|
||||
* Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
|
||||
* pages in the range dirty.
|
||||
*/
|
||||
static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
|
||||
{
|
||||
if (!kvm_pmd_huge(*pmd))
|
||||
return;
|
||||
|
||||
pmd_clear(pmd);
|
||||
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
||||
put_page(virt_to_page(pmd));
|
||||
}
|
||||
|
||||
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
|
||||
int min, int max)
|
||||
{
|
||||
|
@ -767,10 +806,15 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
|
|||
}
|
||||
|
||||
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
phys_addr_t addr, const pte_t *new_pte, bool iomap)
|
||||
phys_addr_t addr, const pte_t *new_pte,
|
||||
unsigned long flags)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pte_t *pte, old_pte;
|
||||
bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
|
||||
bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
|
||||
|
||||
VM_BUG_ON(logging_active && !cache);
|
||||
|
||||
/* Create stage-2 page table mapping - Levels 0 and 1 */
|
||||
pmd = stage2_get_pmd(kvm, cache, addr);
|
||||
|
@ -782,6 +826,13 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* While dirty page logging - dissolve huge PMD, then continue on to
|
||||
* allocate page.
|
||||
*/
|
||||
if (logging_active)
|
||||
stage2_dissolve_pmd(kvm, addr, pmd);
|
||||
|
||||
/* Create stage-2 page mappings - Level 2 */
|
||||
if (pmd_none(*pmd)) {
|
||||
if (!cache)
|
||||
|
@ -838,7 +889,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
|||
if (ret)
|
||||
goto out;
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
|
||||
ret = stage2_set_pte(kvm, &cache, addr, &pte,
|
||||
KVM_S2PTE_FLAG_IS_IOMAP);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -905,6 +957,151 @@ static bool kvm_is_device_pfn(unsigned long pfn)
|
|||
return !pfn_valid(pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_ptes - write protect PMD range
|
||||
* @pmd: pointer to pmd entry
|
||||
* @addr: range start address
|
||||
* @end: range end address
|
||||
*/
|
||||
static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
do {
|
||||
if (!pte_none(*pte)) {
|
||||
if (!kvm_s2pte_readonly(pte))
|
||||
kvm_set_s2pte_readonly(pte);
|
||||
}
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_pmds - write protect PUD range
|
||||
* @pud: pointer to pud entry
|
||||
* @addr: range start address
|
||||
* @end: range end address
|
||||
*/
|
||||
static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
phys_addr_t next;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
||||
do {
|
||||
next = kvm_pmd_addr_end(addr, end);
|
||||
if (!pmd_none(*pmd)) {
|
||||
if (kvm_pmd_huge(*pmd)) {
|
||||
if (!kvm_s2pmd_readonly(pmd))
|
||||
kvm_set_s2pmd_readonly(pmd);
|
||||
} else {
|
||||
stage2_wp_ptes(pmd, addr, next);
|
||||
}
|
||||
}
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_puds - write protect PGD range
|
||||
* @pgd: pointer to pgd entry
|
||||
* @addr: range start address
|
||||
* @end: range end address
|
||||
*
|
||||
* Process PUD entries, for a huge PUD we cause a panic.
|
||||
*/
|
||||
static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
pud_t *pud;
|
||||
phys_addr_t next;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
do {
|
||||
next = kvm_pud_addr_end(addr, end);
|
||||
if (!pud_none(*pud)) {
|
||||
/* TODO:PUD not supported, revisit later if supported */
|
||||
BUG_ON(kvm_pud_huge(*pud));
|
||||
stage2_wp_pmds(pud, addr, next);
|
||||
}
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_range() - write protect stage2 memory region range
|
||||
* @kvm: The KVM pointer
|
||||
* @addr: Start address of range
|
||||
* @end: End address of range
|
||||
*/
|
||||
static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
phys_addr_t next;
|
||||
|
||||
pgd = kvm->arch.pgd + pgd_index(addr);
|
||||
do {
|
||||
/*
|
||||
* Release kvm_mmu_lock periodically if the memory region is
|
||||
* large. Otherwise, we may see kernel panics with
|
||||
* CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
|
||||
* CONFIG_LOCKDEP. Additionally, holding the lock too long
|
||||
* will also starve other vCPUs.
|
||||
*/
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
|
||||
next = kvm_pgd_addr_end(addr, end);
|
||||
if (pgd_present(*pgd))
|
||||
stage2_wp_puds(pgd, addr, next);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
|
||||
* @kvm: The KVM pointer
|
||||
* @slot: The memory slot to write protect
|
||||
*
|
||||
* Called to start logging dirty pages after memory region
|
||||
* KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
|
||||
* all present PMD and PTEs are write protected in the memory region.
|
||||
* Afterwards read of dirty page log can be called.
|
||||
*
|
||||
* Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
|
||||
* serializing operations for VM memory regions.
|
||||
*/
|
||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
|
||||
{
|
||||
struct kvm_memory_slot *memslot = id_to_memslot(kvm->memslots, slot);
|
||||
phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
|
||||
phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
stage2_wp_range(kvm, start, end);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_mmu_write_protect_pt_masked() - write protect dirty pages
|
||||
* @kvm: The KVM pointer
|
||||
* @slot: The memory slot associated with mask
|
||||
* @gfn_offset: The gfn offset in memory slot
|
||||
* @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
|
||||
* slot to be write protected
|
||||
*
|
||||
* Walks bits set in mask write protects the associated pte's. Caller must
|
||||
* acquire kvm_mmu_lock.
|
||||
*/
|
||||
void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset, unsigned long mask)
|
||||
{
|
||||
phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
|
||||
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
|
||||
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
|
||||
|
||||
stage2_wp_range(kvm, start, end);
|
||||
}
|
||||
|
||||
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm_memory_slot *memslot, unsigned long hva,
|
||||
unsigned long fault_status)
|
||||
|
@ -919,6 +1116,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
pfn_t pfn;
|
||||
pgprot_t mem_type = PAGE_S2;
|
||||
bool fault_ipa_uncached;
|
||||
bool logging_active = memslot_is_logging(memslot);
|
||||
unsigned long flags = 0;
|
||||
|
||||
write_fault = kvm_is_write_fault(vcpu);
|
||||
if (fault_status == FSC_PERM && !write_fault) {
|
||||
|
@ -935,7 +1134,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
if (is_vm_hugetlb_page(vma) && !logging_active) {
|
||||
hugetlb = true;
|
||||
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
|
||||
} else {
|
||||
|
@ -976,12 +1175,30 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
if (is_error_pfn(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
if (kvm_is_device_pfn(pfn))
|
||||
if (kvm_is_device_pfn(pfn)) {
|
||||
mem_type = PAGE_S2_DEVICE;
|
||||
flags |= KVM_S2PTE_FLAG_IS_IOMAP;
|
||||
} else if (logging_active) {
|
||||
/*
|
||||
* Faults on pages in a memslot with logging enabled
|
||||
* should not be mapped with huge pages (it introduces churn
|
||||
* and performance degradation), so force a pte mapping.
|
||||
*/
|
||||
force_pte = true;
|
||||
flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
|
||||
|
||||
/*
|
||||
* Only actually map the page as writable if this was a write
|
||||
* fault.
|
||||
*/
|
||||
if (!write_fault)
|
||||
writable = false;
|
||||
}
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
|
||||
if (!hugetlb && !force_pte)
|
||||
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
|
||||
|
||||
|
@ -999,17 +1216,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
|
||||
} else {
|
||||
pte_t new_pte = pfn_pte(pfn, mem_type);
|
||||
|
||||
if (writable) {
|
||||
kvm_set_s2pte_writable(&new_pte);
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
mark_page_dirty(kvm, gfn);
|
||||
}
|
||||
coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
|
||||
fault_ipa_uncached);
|
||||
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
|
||||
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
|
||||
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
|
||||
}
|
||||
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
|
@ -1159,7 +1376,14 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
|||
{
|
||||
pte_t *pte = (pte_t *)data;
|
||||
|
||||
stage2_set_pte(kvm, NULL, gpa, pte, false);
|
||||
/*
|
||||
* We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
|
||||
* flag clear because MMU notifiers will have unmapped a huge PMD before
|
||||
* calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
|
||||
* therefore stage2_set_pte() never needs to clear out a huge PMD
|
||||
* through this calling path.
|
||||
*/
|
||||
stage2_set_pte(kvm, NULL, gpa, pte, 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1292,6 +1516,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||
const struct kvm_memory_slot *old,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
/*
|
||||
* At this point memslot has been committed and there is an
|
||||
* allocated dirty_bitmap[], dirty pages will be be tracked while the
|
||||
* memory slot is write protected.
|
||||
*/
|
||||
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
||||
kvm_mmu_wp_memory_region(kvm, mem->slot);
|
||||
}
|
||||
|
||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
|
@ -1304,7 +1535,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
bool writable = !(mem->flags & KVM_MEM_READONLY);
|
||||
int ret = 0;
|
||||
|
||||
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
|
||||
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
|
||||
change != KVM_MR_FLAGS_ONLY)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -1355,6 +1587,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
|
||||
vm_start - vma->vm_start;
|
||||
|
||||
/* IO region dirty page logging not allowed */
|
||||
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
|
||||
vm_end - vm_start,
|
||||
writable);
|
||||
|
@ -1364,6 +1600,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
hva = vm_end;
|
||||
} while (hva < reg_end);
|
||||
|
||||
if (change == KVM_MR_FLAGS_ONLY)
|
||||
return ret;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (ret)
|
||||
unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
#include <asm/kvm_host.h>
|
||||
|
||||
/*
|
||||
* This is an implementation of the Power State Coordination Interface
|
||||
|
@ -66,25 +67,17 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
|
|||
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
{
|
||||
struct kvm *kvm = source_vcpu->kvm;
|
||||
struct kvm_vcpu *vcpu = NULL, *tmp;
|
||||
struct kvm_vcpu *vcpu = NULL;
|
||||
wait_queue_head_t *wq;
|
||||
unsigned long cpu_id;
|
||||
unsigned long context_id;
|
||||
unsigned long mpidr;
|
||||
phys_addr_t target_pc;
|
||||
int i;
|
||||
|
||||
cpu_id = *vcpu_reg(source_vcpu, 1);
|
||||
cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
|
||||
if (vcpu_mode_is_32bit(source_vcpu))
|
||||
cpu_id &= ~((u32) 0);
|
||||
|
||||
kvm_for_each_vcpu(i, tmp, kvm) {
|
||||
mpidr = kvm_vcpu_get_mpidr(tmp);
|
||||
if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
|
||||
vcpu = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
|
||||
|
||||
/*
|
||||
* Make sure the caller requested a valid CPU and that the CPU is
|
||||
|
@ -155,7 +148,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
|
|||
* then ON else OFF
|
||||
*/
|
||||
kvm_for_each_vcpu(i, tmp, kvm) {
|
||||
mpidr = kvm_vcpu_get_mpidr(tmp);
|
||||
mpidr = kvm_vcpu_get_mpidr_aff(tmp);
|
||||
if (((mpidr & target_affinity_mask) == target_affinity) &&
|
||||
!tmp->arch.pause) {
|
||||
return PSCI_0_2_AFFINITY_LEVEL_ON;
|
||||
|
|
|
@ -140,19 +140,22 @@ TRACE_EVENT(kvm_emulate_cp15_imp,
|
|||
__entry->CRm, __entry->Op2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_wfi,
|
||||
TP_PROTO(unsigned long vcpu_pc),
|
||||
TP_ARGS(vcpu_pc),
|
||||
TRACE_EVENT(kvm_wfx,
|
||||
TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
|
||||
TP_ARGS(vcpu_pc, is_wfe),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
__field( bool, is_wfe )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->is_wfe = is_wfe;
|
||||
),
|
||||
|
||||
TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
|
||||
TP_printk("guest executed wf%c at: 0x%08lx",
|
||||
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_unmap_hva,
|
||||
|
|
|
@ -96,6 +96,7 @@
|
|||
#define ESR_ELx_COND_SHIFT (20)
|
||||
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
|
||||
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
|
||||
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/types.h>
|
||||
|
|
|
@ -126,6 +126,7 @@ extern char __kvm_hyp_vector[];
|
|||
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cputype.h>
|
||||
|
||||
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
|
||||
unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
|
||||
|
@ -128,6 +129,11 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
|
|||
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
|
||||
}
|
||||
|
||||
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
|
||||
|
@ -189,9 +195,9 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
|||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
|
||||
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_sys_reg(vcpu, MPIDR_EL1);
|
||||
return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -59,6 +59,9 @@ struct kvm_arch {
|
|||
/* VTTBR value associated with above pgd and vmid */
|
||||
u64 vttbr;
|
||||
|
||||
/* The maximum number of vCPUs depends on the used GIC model */
|
||||
int max_vcpus;
|
||||
|
||||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
|
||||
|
@ -199,6 +202,7 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
|
|||
|
||||
u64 kvm_call_hyp(void *hypfn, ...);
|
||||
void force_vm_exit(const cpumask_t *mask);
|
||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
|
||||
|
||||
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
int exception_index);
|
||||
|
@ -206,6 +210,8 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
int kvm_perf_init(void);
|
||||
int kvm_perf_teardown(void);
|
||||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
|
||||
phys_addr_t pgd_ptr,
|
||||
unsigned long hyp_stack_ptr,
|
||||
|
|
|
@ -40,6 +40,7 @@ struct kvm_exit_mmio {
|
|||
u8 data[8];
|
||||
u32 len;
|
||||
bool is_write;
|
||||
void *private;
|
||||
};
|
||||
|
||||
static inline void kvm_prepare_mmio(struct kvm_run *run,
|
||||
|
|
|
@ -118,6 +118,27 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
|
|||
pmd_val(*pmd) |= PMD_S2_RDWR;
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pte_readonly(pte_t *pte)
|
||||
{
|
||||
pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pte_readonly(pte_t *pte)
|
||||
{
|
||||
return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
|
||||
{
|
||||
pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
|
||||
{
|
||||
return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
|
||||
}
|
||||
|
||||
|
||||
#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
|
||||
#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
|
||||
#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
|
||||
|
|
|
@ -119,6 +119,7 @@
|
|||
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
|
||||
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
|
||||
|
||||
#define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
|
||||
#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
|
||||
|
||||
/*
|
||||
|
|
|
@ -78,6 +78,13 @@ struct kvm_regs {
|
|||
#define KVM_VGIC_V2_DIST_SIZE 0x1000
|
||||
#define KVM_VGIC_V2_CPU_SIZE 0x2000
|
||||
|
||||
/* Supported VGICv3 address types */
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
|
||||
|
||||
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
|
||||
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
|
||||
|
||||
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
|
||||
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
|
||||
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
|
||||
|
@ -161,6 +168,8 @@ struct kvm_arch_memory_slot {
|
|||
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
|
||||
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
|
||||
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
|
||||
|
||||
/* KVM_IRQ_LINE irq field index values */
|
||||
#define KVM_ARM_IRQ_TYPE_SHIFT 24
|
||||
|
|
|
@ -140,6 +140,7 @@ int main(void)
|
|||
DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
|
||||
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
|
||||
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
|
||||
DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
|
||||
DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
|
||||
DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
|
||||
DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
|
||||
|
|
|
@ -22,10 +22,12 @@ config KVM
|
|||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
select KVM_MMIO
|
||||
select KVM_ARM_HOST
|
||||
select KVM_ARM_VGIC
|
||||
select KVM_ARM_TIMER
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
---help---
|
||||
Support hosting virtualized guest machines.
|
||||
|
||||
|
|
|
@ -21,7 +21,9 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
|
|||
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
|
||||
|
|
|
@ -28,12 +28,18 @@
|
|||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
|
||||
|
||||
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
int ret;
|
||||
|
||||
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
|
||||
kvm_vcpu_hvc_get_imm(vcpu));
|
||||
|
||||
ret = kvm_psci_call(vcpu);
|
||||
if (ret < 0) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
@ -63,10 +69,13 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
*/
|
||||
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE)
|
||||
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
|
||||
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
|
||||
kvm_vcpu_on_spin(vcpu);
|
||||
else
|
||||
} else {
|
||||
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
|
||||
kvm_vcpu_block(vcpu);
|
||||
}
|
||||
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
|
||||
|
|
|
@ -1031,6 +1031,28 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
|
|||
ret
|
||||
ENDPROC(__kvm_tlb_flush_vmid_ipa)
|
||||
|
||||
/**
|
||||
* void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
|
||||
* @struct kvm *kvm - pointer to kvm structure
|
||||
*
|
||||
* Invalidates all Stage 1 and 2 TLB entries for current VMID.
|
||||
*/
|
||||
ENTRY(__kvm_tlb_flush_vmid)
|
||||
dsb ishst
|
||||
|
||||
kern_hyp_va x0
|
||||
ldr x2, [x0, #KVM_VTTBR]
|
||||
msr vttbr_el2, x2
|
||||
isb
|
||||
|
||||
tlbi vmalls12e1is
|
||||
dsb ish
|
||||
isb
|
||||
|
||||
msr vttbr_el2, xzr
|
||||
ret
|
||||
ENDPROC(__kvm_tlb_flush_vmid)
|
||||
|
||||
ENTRY(__kvm_flush_vm_context)
|
||||
dsb ishst
|
||||
tlbi alle1is
|
||||
|
|
|
@ -168,6 +168,27 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trap handler for the GICv3 SGI generation system register.
|
||||
* Forward the request to the VGIC emulation.
|
||||
* The cp15_64 code makes sure this automatically works
|
||||
* for both AArch64 and AArch32 accesses.
|
||||
*/
|
||||
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
if (!p->is_write)
|
||||
return read_from_write_only(vcpu, p);
|
||||
|
||||
val = *vcpu_reg(vcpu, p->Rt);
|
||||
vgic_v3_dispatch_sgi(vcpu, val);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
|
@ -255,10 +276,19 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
|
||||
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 mpidr;
|
||||
|
||||
/*
|
||||
* Simply map the vcpu_id into the Aff0 field of the MPIDR.
|
||||
* Map the vcpu_id into the first three affinity level fields of
|
||||
* the MPIDR. We limit the number of VCPUs in level 0 due to a
|
||||
* limitation to 16 CPUs in that level in the ICC_SGIxR registers
|
||||
* of the GICv3 to be able to address each CPU directly when
|
||||
* sending IPIs.
|
||||
*/
|
||||
vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
|
||||
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
|
||||
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
|
||||
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
|
||||
vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
|
||||
}
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
|
@ -428,6 +458,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_val, VBAR_EL1, 0 },
|
||||
|
||||
/* ICC_SGI1R_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
|
||||
access_gic_sgi },
|
||||
/* ICC_SRE_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
|
||||
trap_raz_wi },
|
||||
|
@ -660,6 +693,8 @@ static const struct sys_reg_desc cp14_64_regs[] = {
|
|||
* register).
|
||||
*/
|
||||
static const struct sys_reg_desc cp15_regs[] = {
|
||||
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
|
||||
|
||||
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
|
||||
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
|
||||
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
|
||||
|
@ -707,6 +742,7 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|||
|
||||
static const struct sys_reg_desc cp15_64_regs[] = {
|
||||
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
|
||||
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
|
||||
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
|
||||
};
|
||||
|
||||
|
|
55
arch/arm64/kvm/trace.h
Normal file
55
arch/arm64/kvm/trace.h
Normal file
|
@ -0,0 +1,55 @@
|
|||
#if !defined(_TRACE_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_ARM64_KVM_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
|
||||
TRACE_EVENT(kvm_wfx_arm64,
|
||||
TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
|
||||
TP_ARGS(vcpu_pc, is_wfe),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, vcpu_pc)
|
||||
__field(bool, is_wfe)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->is_wfe = is_wfe;
|
||||
),
|
||||
|
||||
TP_printk("guest executed wf%c at: 0x%08lx",
|
||||
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_hvc_arm64,
|
||||
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
|
||||
TP_ARGS(vcpu_pc, r0, imm),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, vcpu_pc)
|
||||
__field(unsigned long, r0)
|
||||
__field(unsigned long, imm)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->r0 = r0;
|
||||
__entry->imm = imm;
|
||||
),
|
||||
|
||||
TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
|
||||
__entry->vcpu_pc, __entry->r0, __entry->imm)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_ARM64_KVM_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
|
@ -148,17 +148,18 @@
|
|||
* x0: Register pointing to VCPU struct
|
||||
*/
|
||||
.macro restore_vgic_v3_state
|
||||
// Disable SRE_EL1 access. Necessary, otherwise
|
||||
// ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
|
||||
msr_s ICC_SRE_EL1, xzr
|
||||
isb
|
||||
|
||||
// Compute the address of struct vgic_cpu
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
// Restore all interesting registers
|
||||
ldr w4, [x3, #VGIC_V3_CPU_HCR]
|
||||
ldr w5, [x3, #VGIC_V3_CPU_VMCR]
|
||||
ldr w25, [x3, #VGIC_V3_CPU_SRE]
|
||||
|
||||
msr_s ICC_SRE_EL1, x25
|
||||
|
||||
// make sure SRE is valid before writing the other registers
|
||||
isb
|
||||
|
||||
msr_s ICH_HCR_EL2, x4
|
||||
msr_s ICH_VMCR_EL2, x5
|
||||
|
@ -244,9 +245,12 @@
|
|||
dsb sy
|
||||
|
||||
// Prevent the guest from touching the GIC system registers
|
||||
// if SRE isn't enabled for GICv3 emulation
|
||||
cbnz x25, 1f
|
||||
mrs_s x5, ICC_SRE_EL2
|
||||
and x5, x5, #~ICC_SRE_EL2_ENABLE
|
||||
msr_s ICC_SRE_EL2, x5
|
||||
1:
|
||||
.endm
|
||||
|
||||
ENTRY(__save_vgic_v3_state)
|
||||
|
|
|
@ -835,9 +835,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
|||
|
||||
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
|
||||
void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset, unsigned long mask);
|
||||
void kvm_mmu_zap_all(struct kvm *kvm);
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
|
||||
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
|
||||
|
|
|
@ -39,6 +39,7 @@ config KVM
|
|||
select PERF_EVENTS
|
||||
select HAVE_KVM_MSI
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select KVM_VFIO
|
||||
---help---
|
||||
Support hosting fully virtualized guest machines using hardware
|
||||
|
|
|
@ -1216,7 +1216,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
|
|||
}
|
||||
|
||||
/**
|
||||
* kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
|
||||
* kvm_arch_mmu_write_protect_pt_masked - write protect selected PT level pages
|
||||
* @kvm: kvm instance
|
||||
* @slot: slot to protect
|
||||
* @gfn_offset: start of the BITS_PER_LONG pages we care about
|
||||
|
@ -1225,7 +1225,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
|
|||
* Used when we do not need to care about huge page mappings: e.g. during dirty
|
||||
* logging we do not have any such mappings.
|
||||
*/
|
||||
void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
|
||||
void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset, unsigned long mask)
|
||||
{
|
||||
|
|
|
@ -3759,83 +3759,37 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
|
|||
* @kvm: kvm instance
|
||||
* @log: slot id and address to which we copy the log
|
||||
*
|
||||
* We need to keep it in mind that VCPU threads can write to the bitmap
|
||||
* concurrently. So, to avoid losing data, we keep the following order for
|
||||
* each bit:
|
||||
* Steps 1-4 below provide general overview of dirty page logging. See
|
||||
* kvm_get_dirty_log_protect() function description for additional details.
|
||||
*
|
||||
* We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
|
||||
* always flush the TLB (step 4) even if previous step failed and the dirty
|
||||
* bitmap may be corrupt. Regardless of previous outcome the KVM logging API
|
||||
* does not preclude user space subsequent dirty log read. Flushing TLB ensures
|
||||
* writes will be marked dirty for next log read.
|
||||
*
|
||||
* 1. Take a snapshot of the bit and clear it if needed.
|
||||
* 2. Write protect the corresponding page.
|
||||
* 3. Flush TLB's if needed.
|
||||
* 4. Copy the snapshot to the userspace.
|
||||
*
|
||||
* Between 2 and 3, the guest may write to the page using the remaining TLB
|
||||
* entry. This is not a problem because the page will be reported dirty at
|
||||
* step 4 using the snapshot taken before and step 3 ensures that successive
|
||||
* writes will be logged for the next call.
|
||||
* 3. Copy the snapshot to the userspace.
|
||||
* 4. Flush TLB's if needed.
|
||||
*/
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||
{
|
||||
int r;
|
||||
struct kvm_memory_slot *memslot;
|
||||
unsigned long n, i;
|
||||
unsigned long *dirty_bitmap;
|
||||
unsigned long *dirty_bitmap_buffer;
|
||||
bool is_dirty = false;
|
||||
int r;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = -EINVAL;
|
||||
if (log->slot >= KVM_USER_MEM_SLOTS)
|
||||
goto out;
|
||||
|
||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
||||
|
||||
dirty_bitmap = memslot->dirty_bitmap;
|
||||
r = -ENOENT;
|
||||
if (!dirty_bitmap)
|
||||
goto out;
|
||||
|
||||
n = kvm_dirty_bitmap_bytes(memslot);
|
||||
|
||||
dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
|
||||
memset(dirty_bitmap_buffer, 0, n);
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
for (i = 0; i < n / sizeof(long); i++) {
|
||||
unsigned long mask;
|
||||
gfn_t offset;
|
||||
|
||||
if (!dirty_bitmap[i])
|
||||
continue;
|
||||
|
||||
is_dirty = true;
|
||||
|
||||
mask = xchg(&dirty_bitmap[i], 0);
|
||||
dirty_bitmap_buffer[i] = mask;
|
||||
|
||||
offset = i * BITS_PER_LONG;
|
||||
kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
|
||||
}
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
/* See the comments in kvm_mmu_slot_remove_write_access(). */
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
|
||||
|
||||
/*
|
||||
* All the TLBs can be flushed out of mmu lock, see the comments in
|
||||
* kvm_mmu_slot_remove_write_access().
|
||||
*/
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
if (is_dirty)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
|
||||
goto out;
|
||||
|
||||
r = 0;
|
||||
out:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -481,15 +481,19 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
|||
return tlist;
|
||||
}
|
||||
|
||||
#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
|
||||
(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
|
||||
<< ICC_SGI1R_AFFINITY_## level ##_SHIFT)
|
||||
|
||||
static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
|
||||
MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
|
||||
irq << 24 |
|
||||
MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
|
||||
tlist);
|
||||
val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
|
||||
MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
|
||||
irq << ICC_SGI1R_SGI_ID_SHIFT |
|
||||
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
|
||||
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
|
||||
|
||||
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
|
||||
gic_write_sgi1r(val);
|
||||
|
|
|
@ -33,10 +33,11 @@
|
|||
#define VGIC_V2_MAX_LRS (1 << 6)
|
||||
#define VGIC_V3_MAX_LRS 16
|
||||
#define VGIC_MAX_IRQS 1024
|
||||
#define VGIC_V2_MAX_CPUS 8
|
||||
|
||||
/* Sanity checks... */
|
||||
#if (KVM_MAX_VCPUS > 8)
|
||||
#error Invalid number of CPU interfaces
|
||||
#if (KVM_MAX_VCPUS > 255)
|
||||
#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
|
||||
#endif
|
||||
|
||||
#if (VGIC_NR_IRQS_LEGACY & 31)
|
||||
|
@ -132,6 +133,18 @@ struct vgic_params {
|
|||
unsigned int maint_irq;
|
||||
/* Virtual control interface base address */
|
||||
void __iomem *vctrl_base;
|
||||
int max_gic_vcpus;
|
||||
/* Only needed for the legacy KVM_CREATE_IRQCHIP */
|
||||
bool can_emulate_gicv2;
|
||||
};
|
||||
|
||||
struct vgic_vm_ops {
|
||||
bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
|
||||
struct kvm_exit_mmio *);
|
||||
bool (*queue_sgi)(struct kvm_vcpu *, int irq);
|
||||
void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
|
||||
int (*init_model)(struct kvm *);
|
||||
int (*map_resources)(struct kvm *, const struct vgic_params *);
|
||||
};
|
||||
|
||||
struct vgic_dist {
|
||||
|
@ -140,6 +153,9 @@ struct vgic_dist {
|
|||
bool in_kernel;
|
||||
bool ready;
|
||||
|
||||
/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
|
||||
u32 vgic_model;
|
||||
|
||||
int nr_cpus;
|
||||
int nr_irqs;
|
||||
|
||||
|
@ -148,7 +164,11 @@ struct vgic_dist {
|
|||
|
||||
/* Distributor and vcpu interface mapping in the guest */
|
||||
phys_addr_t vgic_dist_base;
|
||||
phys_addr_t vgic_cpu_base;
|
||||
/* GICv2 and GICv3 use different mapped register blocks */
|
||||
union {
|
||||
phys_addr_t vgic_cpu_base;
|
||||
phys_addr_t vgic_redist_base;
|
||||
};
|
||||
|
||||
/* Distributor enabled */
|
||||
u32 enabled;
|
||||
|
@ -210,8 +230,13 @@ struct vgic_dist {
|
|||
*/
|
||||
struct vgic_bitmap *irq_spi_target;
|
||||
|
||||
/* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
|
||||
u32 *irq_spi_mpidr;
|
||||
|
||||
/* Bitmap indicating which CPU has something pending */
|
||||
unsigned long *irq_pending_on_cpu;
|
||||
|
||||
struct vgic_vm_ops vm_ops;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -229,6 +254,7 @@ struct vgic_v3_cpu_if {
|
|||
#ifdef CONFIG_ARM_GIC_V3
|
||||
u32 vgic_hcr;
|
||||
u32 vgic_vmcr;
|
||||
u32 vgic_sre; /* Restored only, change ignored */
|
||||
u32 vgic_misr; /* Saved only */
|
||||
u32 vgic_eisr; /* Saved only */
|
||||
u32 vgic_elrsr; /* Saved only */
|
||||
|
@ -275,13 +301,15 @@ struct kvm_exit_mmio;
|
|||
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
|
||||
int kvm_vgic_hyp_init(void);
|
||||
int kvm_vgic_map_resources(struct kvm *kvm);
|
||||
int kvm_vgic_create(struct kvm *kvm);
|
||||
int kvm_vgic_get_max_vcpus(void);
|
||||
int kvm_vgic_create(struct kvm *kvm, u32 type);
|
||||
void kvm_vgic_destroy(struct kvm *kvm);
|
||||
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
|
||||
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
|
||||
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
|
||||
bool level);
|
||||
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
|
||||
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
|
||||
bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio);
|
||||
|
@ -327,7 +355,7 @@ static inline int kvm_vgic_map_resources(struct kvm *kvm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_create(struct kvm *kvm)
|
||||
static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -379,6 +407,11 @@ static inline bool vgic_ready(struct kvm *kvm)
|
|||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_get_max_vcpus(void)
|
||||
{
|
||||
return KVM_MAX_VCPUS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#define GICD_SETSPI_SR 0x0050
|
||||
#define GICD_CLRSPI_SR 0x0058
|
||||
#define GICD_SEIR 0x0068
|
||||
#define GICD_IGROUPR 0x0080
|
||||
#define GICD_ISENABLER 0x0100
|
||||
#define GICD_ICENABLER 0x0180
|
||||
#define GICD_ISPENDR 0x0200
|
||||
|
@ -41,14 +42,37 @@
|
|||
#define GICD_ICACTIVER 0x0380
|
||||
#define GICD_IPRIORITYR 0x0400
|
||||
#define GICD_ICFGR 0x0C00
|
||||
#define GICD_IGRPMODR 0x0D00
|
||||
#define GICD_NSACR 0x0E00
|
||||
#define GICD_IROUTER 0x6000
|
||||
#define GICD_IDREGS 0xFFD0
|
||||
#define GICD_PIDR2 0xFFE8
|
||||
|
||||
/*
|
||||
* Those registers are actually from GICv2, but the spec demands that they
|
||||
* are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3).
|
||||
*/
|
||||
#define GICD_ITARGETSR 0x0800
|
||||
#define GICD_SGIR 0x0F00
|
||||
#define GICD_CPENDSGIR 0x0F10
|
||||
#define GICD_SPENDSGIR 0x0F20
|
||||
|
||||
#define GICD_CTLR_RWP (1U << 31)
|
||||
#define GICD_CTLR_DS (1U << 6)
|
||||
#define GICD_CTLR_ARE_NS (1U << 4)
|
||||
#define GICD_CTLR_ENABLE_G1A (1U << 1)
|
||||
#define GICD_CTLR_ENABLE_G1 (1U << 0)
|
||||
|
||||
/*
|
||||
* In systems with a single security state (what we emulate in KVM)
|
||||
* the meaning of the interrupt group enable bits is slightly different
|
||||
*/
|
||||
#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
|
||||
#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
|
||||
|
||||
#define GICD_TYPER_LPIS (1U << 17)
|
||||
#define GICD_TYPER_MBIS (1U << 16)
|
||||
|
||||
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
|
||||
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
|
||||
#define GICD_TYPER_LPIS (1U << 17)
|
||||
|
@ -60,6 +84,8 @@
|
|||
#define GIC_PIDR2_ARCH_GICv3 0x30
|
||||
#define GIC_PIDR2_ARCH_GICv4 0x40
|
||||
|
||||
#define GIC_V3_DIST_SIZE 0x10000
|
||||
|
||||
/*
|
||||
* Re-Distributor registers, offsets from RD_base
|
||||
*/
|
||||
|
@ -78,6 +104,7 @@
|
|||
#define GICR_SYNCR 0x00C0
|
||||
#define GICR_MOVLPIR 0x0100
|
||||
#define GICR_MOVALLR 0x0110
|
||||
#define GICR_IDREGS GICD_IDREGS
|
||||
#define GICR_PIDR2 GICD_PIDR2
|
||||
|
||||
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
|
||||
|
@ -104,6 +131,7 @@
|
|||
/*
|
||||
* Re-Distributor registers, offsets from SGI_base
|
||||
*/
|
||||
#define GICR_IGROUPR0 GICD_IGROUPR
|
||||
#define GICR_ISENABLER0 GICD_ISENABLER
|
||||
#define GICR_ICENABLER0 GICD_ICENABLER
|
||||
#define GICR_ISPENDR0 GICD_ISPENDR
|
||||
|
@ -112,11 +140,15 @@
|
|||
#define GICR_ICACTIVER0 GICD_ICACTIVER
|
||||
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
|
||||
#define GICR_ICFGR0 GICD_ICFGR
|
||||
#define GICR_IGRPMODR0 GICD_IGRPMODR
|
||||
#define GICR_NSACR GICD_NSACR
|
||||
|
||||
#define GICR_TYPER_PLPIS (1U << 0)
|
||||
#define GICR_TYPER_VLPIS (1U << 1)
|
||||
#define GICR_TYPER_LAST (1U << 4)
|
||||
|
||||
#define GIC_V3_REDIST_SIZE 0x20000
|
||||
|
||||
#define LPI_PROP_GROUP1 (1 << 1)
|
||||
#define LPI_PROP_ENABLED (1 << 0)
|
||||
|
||||
|
@ -248,6 +280,18 @@
|
|||
#define ICC_SRE_EL2_SRE (1 << 0)
|
||||
#define ICC_SRE_EL2_ENABLE (1 << 3)
|
||||
|
||||
#define ICC_SGI1R_TARGET_LIST_SHIFT 0
|
||||
#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_1_SHIFT 16
|
||||
#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_SGI_ID_SHIFT 24
|
||||
#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
|
||||
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
|
||||
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
|
||||
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
|
||||
/*
|
||||
* System register definitions
|
||||
*/
|
||||
|
|
|
@ -611,6 +611,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
|
|||
|
||||
int kvm_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log, int *is_dirty);
|
||||
|
||||
int kvm_get_dirty_log_protect(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log, bool *is_dirty);
|
||||
|
||||
void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset,
|
||||
unsigned long mask);
|
||||
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log);
|
||||
|
||||
|
@ -1042,6 +1051,8 @@ void kvm_unregister_device_ops(u32 type);
|
|||
|
||||
extern struct kvm_device_ops kvm_mpic_ops;
|
||||
extern struct kvm_device_ops kvm_xics_ops;
|
||||
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
|
||||
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
|
||||
|
|
|
@ -952,6 +952,8 @@ enum kvm_device_type {
|
|||
#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2
|
||||
KVM_DEV_TYPE_FLIC,
|
||||
#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
|
||||
KVM_DEV_TYPE_ARM_VGIC_V3,
|
||||
#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
|
||||
KVM_DEV_TYPE_MAX,
|
||||
};
|
||||
|
||||
|
|
|
@ -37,3 +37,9 @@ config HAVE_KVM_CPU_RELAX_INTERCEPT
|
|||
|
||||
config KVM_VFIO
|
||||
bool
|
||||
|
||||
config HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
bool
|
||||
|
||||
config KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
bool
|
||||
|
|
847
virt/kvm/arm/vgic-v2-emul.c
Normal file
847
virt/kvm/arm/vgic-v2-emul.c
Normal file
|
@ -0,0 +1,847 @@
|
|||
/*
|
||||
* Contains GICv2 specific emulation code, was in vgic.c before.
|
||||
*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#include "vgic.h"
|
||||
|
||||
#define GICC_ARCH_VERSION_V2 0x2
|
||||
|
||||
static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
|
||||
static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
|
||||
{
|
||||
return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
|
||||
}
|
||||
|
||||
static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio, phys_addr_t offset)
|
||||
{
|
||||
u32 reg;
|
||||
u32 word_offset = offset & 3;
|
||||
|
||||
switch (offset & ~3) {
|
||||
case 0: /* GICD_CTLR */
|
||||
reg = vcpu->kvm->arch.vgic.enabled;
|
||||
vgic_reg_access(mmio, ®, word_offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
|
||||
if (mmio->is_write) {
|
||||
vcpu->kvm->arch.vgic.enabled = reg & 1;
|
||||
vgic_update_state(vcpu->kvm);
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
|
||||
case 4: /* GICD_TYPER */
|
||||
reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
|
||||
reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
|
||||
vgic_reg_access(mmio, ®, word_offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
|
||||
break;
|
||||
|
||||
case 8: /* GICD_IIDR */
|
||||
reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
|
||||
vgic_reg_access(mmio, ®, word_offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
|
||||
vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
|
||||
}
|
||||
|
||||
static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
|
||||
vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
|
||||
}
|
||||
|
||||
static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
|
||||
vcpu->vcpu_id);
|
||||
}
|
||||
|
||||
static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
|
||||
vcpu->vcpu_id);
|
||||
}
|
||||
|
||||
static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
|
||||
vcpu->vcpu_id, offset);
|
||||
vgic_reg_access(mmio, reg, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
|
||||
return false;
|
||||
}
|
||||
|
||||
#define GICD_ITARGETSR_SIZE 32
|
||||
#define GICD_CPUTARGETS_BITS 8
|
||||
#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
|
||||
static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
int i;
|
||||
u32 val = 0;
|
||||
|
||||
irq -= VGIC_NR_PRIVATE_IRQS;
|
||||
|
||||
for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
|
||||
val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, c;
|
||||
unsigned long *bmap;
|
||||
u32 target;
|
||||
|
||||
irq -= VGIC_NR_PRIVATE_IRQS;
|
||||
|
||||
/*
|
||||
* Pick the LSB in each byte. This ensures we target exactly
|
||||
* one vcpu per IRQ. If the byte is null, assume we target
|
||||
* CPU0.
|
||||
*/
|
||||
for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
|
||||
int shift = i * GICD_CPUTARGETS_BITS;
|
||||
|
||||
target = ffs((val >> shift) & 0xffU);
|
||||
target = target ? (target - 1) : 0;
|
||||
dist->irq_spi_cpu[irq + i] = target;
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
|
||||
if (c == target)
|
||||
set_bit(irq + i, bmap);
|
||||
else
|
||||
clear_bit(irq + i, bmap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/* We treat the banked interrupts targets as read-only */
|
||||
if (offset < 32) {
|
||||
u32 roreg;
|
||||
|
||||
roreg = 1 << vcpu->vcpu_id;
|
||||
roreg |= roreg << 8;
|
||||
roreg |= roreg << 16;
|
||||
|
||||
vgic_reg_access(mmio, &roreg, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
|
||||
return false;
|
||||
}
|
||||
|
||||
reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
|
||||
vgic_reg_access(mmio, ®, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
|
||||
if (mmio->is_write) {
|
||||
vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
|
||||
vgic_update_state(vcpu->kvm);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio, phys_addr_t offset)
|
||||
{
|
||||
u32 *reg;
|
||||
|
||||
reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
|
||||
vcpu->vcpu_id, offset >> 1);
|
||||
|
||||
return vgic_handle_cfg_reg(reg, mmio, offset);
|
||||
}
|
||||
|
||||
static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio, phys_addr_t offset)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
vgic_reg_access(mmio, ®, offset,
|
||||
ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
|
||||
if (mmio->is_write) {
|
||||
vgic_dispatch_sgi(vcpu, reg);
|
||||
vgic_update_state(vcpu->kvm);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
|
||||
static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
int sgi;
|
||||
int min_sgi = (offset & ~0x3);
|
||||
int max_sgi = min_sgi + 3;
|
||||
int vcpu_id = vcpu->vcpu_id;
|
||||
u32 reg = 0;
|
||||
|
||||
/* Copy source SGIs from distributor side */
|
||||
for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
|
||||
u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
|
||||
|
||||
reg |= ((u32)sources) << (8 * (sgi - min_sgi));
|
||||
}
|
||||
|
||||
mmio_data_write(mmio, ~0, reg);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, bool set)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
int sgi;
|
||||
int min_sgi = (offset & ~0x3);
|
||||
int max_sgi = min_sgi + 3;
|
||||
int vcpu_id = vcpu->vcpu_id;
|
||||
u32 reg;
|
||||
bool updated = false;
|
||||
|
||||
reg = mmio_data_read(mmio, ~0);
|
||||
|
||||
/* Clear pending SGIs on the distributor */
|
||||
for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
|
||||
u8 mask = reg >> (8 * (sgi - min_sgi));
|
||||
u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
|
||||
|
||||
if (set) {
|
||||
if ((*src & mask) != mask)
|
||||
updated = true;
|
||||
*src |= mask;
|
||||
} else {
|
||||
if (*src & mask)
|
||||
updated = true;
|
||||
*src &= ~mask;
|
||||
}
|
||||
}
|
||||
|
||||
if (updated)
|
||||
vgic_update_state(vcpu->kvm);
|
||||
|
||||
return updated;
|
||||
}
|
||||
|
||||
static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
if (!mmio->is_write)
|
||||
return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
|
||||
else
|
||||
return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
|
||||
}
|
||||
|
||||
static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
if (!mmio->is_write)
|
||||
return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
|
||||
else
|
||||
return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
|
||||
}
|
||||
|
||||
static const struct kvm_mmio_range vgic_dist_ranges[] = {
|
||||
{
|
||||
.base = GIC_DIST_CTRL,
|
||||
.len = 12,
|
||||
.bits_per_irq = 0,
|
||||
.handle_mmio = handle_mmio_misc,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_IGROUP,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_ENABLE_SET,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_set_enable_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_ENABLE_CLEAR,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_clear_enable_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_PENDING_SET,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_set_pending_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_PENDING_CLEAR,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_clear_pending_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_ACTIVE_SET,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_ACTIVE_CLEAR,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_PRI,
|
||||
.len = VGIC_MAX_IRQS,
|
||||
.bits_per_irq = 8,
|
||||
.handle_mmio = handle_mmio_priority_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_TARGET,
|
||||
.len = VGIC_MAX_IRQS,
|
||||
.bits_per_irq = 8,
|
||||
.handle_mmio = handle_mmio_target_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_CONFIG,
|
||||
.len = VGIC_MAX_IRQS / 4,
|
||||
.bits_per_irq = 2,
|
||||
.handle_mmio = handle_mmio_cfg_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_SOFTINT,
|
||||
.len = 4,
|
||||
.handle_mmio = handle_mmio_sgi_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_SGI_PENDING_CLEAR,
|
||||
.len = VGIC_NR_SGIS,
|
||||
.handle_mmio = handle_mmio_sgi_clear,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_SGI_PENDING_SET,
|
||||
.len = VGIC_NR_SGIS,
|
||||
.handle_mmio = handle_mmio_sgi_set,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
{
|
||||
unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
|
||||
|
||||
if (!is_in_range(mmio->phys_addr, mmio->len, base,
|
||||
KVM_VGIC_V2_DIST_SIZE))
|
||||
return false;
|
||||
|
||||
/* GICv2 does not support accesses wider than 32 bits */
|
||||
if (mmio->len > 4) {
|
||||
kvm_inject_dabt(vcpu, mmio->phys_addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
|
||||
}
|
||||
|
||||
static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
int nrcpus = atomic_read(&kvm->online_vcpus);
|
||||
u8 target_cpus;
|
||||
int sgi, mode, c, vcpu_id;
|
||||
|
||||
vcpu_id = vcpu->vcpu_id;
|
||||
|
||||
sgi = reg & 0xf;
|
||||
target_cpus = (reg >> 16) & 0xff;
|
||||
mode = (reg >> 24) & 3;
|
||||
|
||||
switch (mode) {
|
||||
case 0:
|
||||
if (!target_cpus)
|
||||
return;
|
||||
break;
|
||||
|
||||
case 1:
|
||||
target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
target_cpus = 1 << vcpu_id;
|
||||
break;
|
||||
}
|
||||
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
if (target_cpus & 1) {
|
||||
/* Flag the SGI as pending */
|
||||
vgic_dist_irq_set_pending(vcpu, sgi);
|
||||
*vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
|
||||
kvm_debug("SGI%d from CPU%d to CPU%d\n",
|
||||
sgi, vcpu_id, c);
|
||||
}
|
||||
|
||||
target_cpus >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
unsigned long sources;
|
||||
int vcpu_id = vcpu->vcpu_id;
|
||||
int c;
|
||||
|
||||
sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
|
||||
|
||||
for_each_set_bit(c, &sources, dist->nr_cpus) {
|
||||
if (vgic_queue_irq(vcpu, c, irq))
|
||||
clear_bit(c, &sources);
|
||||
}
|
||||
|
||||
*vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
|
||||
|
||||
/*
|
||||
* If the sources bitmap has been cleared it means that we
|
||||
* could queue all the SGIs onto link registers (see the
|
||||
* clear_bit above), and therefore we are done with them in
|
||||
* our emulated gic and can get rid of them.
|
||||
*/
|
||||
if (!sources) {
|
||||
vgic_dist_irq_clear_pending(vcpu, irq);
|
||||
vgic_cpu_irq_clear(vcpu, irq);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
|
||||
* @kvm: pointer to the kvm struct
|
||||
*
|
||||
* Map the virtual CPU interface into the VM before running any VCPUs. We
|
||||
* can't do this at creation time, because user space must first set the
|
||||
* virtual CPU interface address in the guest physical address space.
|
||||
*/
|
||||
static int vgic_v2_map_resources(struct kvm *kvm,
|
||||
const struct vgic_params *params)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!irqchip_in_kernel(kvm))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
if (vgic_ready(kvm))
|
||||
goto out;
|
||||
|
||||
if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
|
||||
IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
|
||||
kvm_err("Need to set vgic cpu and dist addresses first\n");
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the vgic if this hasn't already been done on demand by
|
||||
* accessing the vgic state from userspace.
|
||||
*/
|
||||
ret = vgic_init(kvm);
|
||||
if (ret) {
|
||||
kvm_err("Unable to allocate maps\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
|
||||
params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
|
||||
true);
|
||||
if (ret) {
|
||||
kvm_err("Unable to remap VGIC CPU to VCPU\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm->arch.vgic.ready = true;
|
||||
out:
|
||||
if (ret)
|
||||
kvm_vgic_destroy(kvm);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
*vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
|
||||
}
|
||||
|
||||
static int vgic_v2_init_model(struct kvm *kvm)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
|
||||
vgic_set_target_reg(kvm, 0, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vgic_v2_init_emulation(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
|
||||
dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
|
||||
dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
|
||||
dist->vm_ops.init_model = vgic_v2_init_model;
|
||||
dist->vm_ops.map_resources = vgic_v2_map_resources;
|
||||
|
||||
kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
|
||||
}
|
||||
|
||||
static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio, phys_addr_t offset)
|
||||
{
|
||||
bool updated = false;
|
||||
struct vgic_vmcr vmcr;
|
||||
u32 *vmcr_field;
|
||||
u32 reg;
|
||||
|
||||
vgic_get_vmcr(vcpu, &vmcr);
|
||||
|
||||
switch (offset & ~0x3) {
|
||||
case GIC_CPU_CTRL:
|
||||
vmcr_field = &vmcr.ctlr;
|
||||
break;
|
||||
case GIC_CPU_PRIMASK:
|
||||
vmcr_field = &vmcr.pmr;
|
||||
break;
|
||||
case GIC_CPU_BINPOINT:
|
||||
vmcr_field = &vmcr.bpr;
|
||||
break;
|
||||
case GIC_CPU_ALIAS_BINPOINT:
|
||||
vmcr_field = &vmcr.abpr;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (!mmio->is_write) {
|
||||
reg = *vmcr_field;
|
||||
mmio_data_write(mmio, ~0, reg);
|
||||
} else {
|
||||
reg = mmio_data_read(mmio, ~0);
|
||||
if (reg != *vmcr_field) {
|
||||
*vmcr_field = reg;
|
||||
vgic_set_vmcr(vcpu, &vmcr);
|
||||
updated = true;
|
||||
}
|
||||
}
|
||||
return updated;
|
||||
}
|
||||
|
||||
static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio, phys_addr_t offset)
|
||||
{
|
||||
return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
|
||||
}
|
||||
|
||||
static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
if (mmio->is_write)
|
||||
return false;
|
||||
|
||||
/* GICC_IIDR */
|
||||
reg = (PRODUCT_ID_KVM << 20) |
|
||||
(GICC_ARCH_VERSION_V2 << 16) |
|
||||
(IMPLEMENTER_ARM << 0);
|
||||
mmio_data_write(mmio, ~0, reg);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU Interface Register accesses - these are not accessed by the VM, but by
|
||||
* user space for saving and restoring VGIC state.
|
||||
*/
|
||||
static const struct kvm_mmio_range vgic_cpu_ranges[] = {
|
||||
{
|
||||
.base = GIC_CPU_CTRL,
|
||||
.len = 12,
|
||||
.handle_mmio = handle_cpu_mmio_misc,
|
||||
},
|
||||
{
|
||||
.base = GIC_CPU_ALIAS_BINPOINT,
|
||||
.len = 4,
|
||||
.handle_mmio = handle_mmio_abpr,
|
||||
},
|
||||
{
|
||||
.base = GIC_CPU_ACTIVEPRIO,
|
||||
.len = 16,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = GIC_CPU_IDENT,
|
||||
.len = 4,
|
||||
.handle_mmio = handle_cpu_mmio_ident,
|
||||
},
|
||||
};
|
||||
|
||||
static int vgic_attr_regs_access(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr,
|
||||
u32 *reg, bool is_write)
|
||||
{
|
||||
const struct kvm_mmio_range *r = NULL, *ranges;
|
||||
phys_addr_t offset;
|
||||
int ret, cpuid, c;
|
||||
struct kvm_vcpu *vcpu, *tmp_vcpu;
|
||||
struct vgic_dist *vgic;
|
||||
struct kvm_exit_mmio mmio;
|
||||
|
||||
offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
|
||||
cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
|
||||
KVM_DEV_ARM_VGIC_CPUID_SHIFT;
|
||||
|
||||
mutex_lock(&dev->kvm->lock);
|
||||
|
||||
ret = vgic_init(dev->kvm);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vcpu = kvm_get_vcpu(dev->kvm, cpuid);
|
||||
vgic = &dev->kvm->arch.vgic;
|
||||
|
||||
mmio.len = 4;
|
||||
mmio.is_write = is_write;
|
||||
if (is_write)
|
||||
mmio_data_write(&mmio, ~0, *reg);
|
||||
switch (attr->group) {
|
||||
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
|
||||
mmio.phys_addr = vgic->vgic_dist_base + offset;
|
||||
ranges = vgic_dist_ranges;
|
||||
break;
|
||||
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
|
||||
mmio.phys_addr = vgic->vgic_cpu_base + offset;
|
||||
ranges = vgic_cpu_ranges;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
r = vgic_find_range(ranges, &mmio, offset);
|
||||
|
||||
if (unlikely(!r || !r->handle_mmio)) {
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
spin_lock(&vgic->lock);
|
||||
|
||||
/*
|
||||
* Ensure that no other VCPU is running by checking the vcpu->cpu
|
||||
* field. If no other VPCUs are running we can safely access the VGIC
|
||||
* state, because even if another VPU is run after this point, that
|
||||
* VCPU will not touch the vgic state, because it will block on
|
||||
* getting the vgic->lock in kvm_vgic_sync_hwstate().
|
||||
*/
|
||||
kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
|
||||
if (unlikely(tmp_vcpu->cpu != -1)) {
|
||||
ret = -EBUSY;
|
||||
goto out_vgic_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Move all pending IRQs from the LRs on all VCPUs so the pending
|
||||
* state can be properly represented in the register state accessible
|
||||
* through this API.
|
||||
*/
|
||||
kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
|
||||
vgic_unqueue_irqs(tmp_vcpu);
|
||||
|
||||
offset -= r->base;
|
||||
r->handle_mmio(vcpu, &mmio, offset);
|
||||
|
||||
if (!is_write)
|
||||
*reg = mmio_data_read(&mmio, ~0);
|
||||
|
||||
ret = 0;
|
||||
out_vgic_unlock:
|
||||
spin_unlock(&vgic->lock);
|
||||
out:
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vgic_v2_create(struct kvm_device *dev, u32 type)
|
||||
{
|
||||
return kvm_vgic_create(dev->kvm, type);
|
||||
}
|
||||
|
||||
static void vgic_v2_destroy(struct kvm_device *dev)
|
||||
{
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static int vgic_v2_set_attr(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vgic_set_common_attr(dev, attr);
|
||||
if (ret != -ENXIO)
|
||||
return ret;
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
|
||||
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
|
||||
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
|
||||
u32 reg;
|
||||
|
||||
if (get_user(reg, uaddr))
|
||||
return -EFAULT;
|
||||
|
||||
return vgic_attr_regs_access(dev, attr, ®, true);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int vgic_v2_get_attr(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vgic_get_common_attr(dev, attr);
|
||||
if (ret != -ENXIO)
|
||||
return ret;
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
|
||||
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
|
||||
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
|
||||
u32 reg = 0;
|
||||
|
||||
ret = vgic_attr_regs_access(dev, attr, ®, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
return put_user(reg, uaddr);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int vgic_v2_has_attr(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
phys_addr_t offset;
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_DEV_ARM_VGIC_GRP_ADDR:
|
||||
switch (attr->attr) {
|
||||
case KVM_VGIC_V2_ADDR_TYPE_DIST:
|
||||
case KVM_VGIC_V2_ADDR_TYPE_CPU:
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
|
||||
offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
|
||||
return vgic_has_attr_regs(vgic_dist_ranges, offset);
|
||||
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
|
||||
offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
|
||||
return vgic_has_attr_regs(vgic_cpu_ranges, offset);
|
||||
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
|
||||
return 0;
|
||||
case KVM_DEV_ARM_VGIC_GRP_CTRL:
|
||||
switch (attr->attr) {
|
||||
case KVM_DEV_ARM_VGIC_CTRL_INIT:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
struct kvm_device_ops kvm_arm_vgic_v2_ops = {
|
||||
.name = "kvm-arm-vgic-v2",
|
||||
.create = vgic_v2_create,
|
||||
.destroy = vgic_v2_destroy,
|
||||
.set_attr = vgic_v2_set_attr,
|
||||
.get_attr = vgic_v2_get_attr,
|
||||
.has_attr = vgic_v2_has_attr,
|
||||
};
|
|
@ -229,12 +229,16 @@ int vgic_v2_probe(struct device_node *vgic_node,
|
|||
goto out_unmap;
|
||||
}
|
||||
|
||||
vgic->can_emulate_gicv2 = true;
|
||||
kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
|
||||
vgic->vcpu_base = vcpu_res.start;
|
||||
|
||||
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
|
||||
vctrl_res.start, vgic->maint_irq);
|
||||
|
||||
vgic->type = VGIC_V2;
|
||||
vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS;
|
||||
*ops = &vgic_v2_ops;
|
||||
*params = vgic;
|
||||
goto out;
|
||||
|
|
1036
virt/kvm/arm/vgic-v3-emul.c
Normal file
1036
virt/kvm/arm/vgic-v3-emul.c
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -34,6 +34,7 @@
|
|||
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
|
||||
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
|
||||
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
|
||||
#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
|
||||
|
||||
/*
|
||||
* LRs are stored in reverse order in memory. make sure we index them
|
||||
|
@ -48,12 +49,17 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
|
|||
struct vgic_lr lr_desc;
|
||||
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
|
||||
|
||||
lr_desc.irq = val & GICH_LR_VIRTUALID;
|
||||
if (lr_desc.irq <= 15)
|
||||
lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
||||
lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
|
||||
else
|
||||
lr_desc.source = 0;
|
||||
lr_desc.state = 0;
|
||||
lr_desc.irq = val & GICH_LR_VIRTUALID;
|
||||
|
||||
lr_desc.source = 0;
|
||||
if (lr_desc.irq <= 15 &&
|
||||
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
|
||||
lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
|
||||
|
||||
lr_desc.state = 0;
|
||||
|
||||
if (val & ICH_LR_PENDING_BIT)
|
||||
lr_desc.state |= LR_STATE_PENDING;
|
||||
|
@ -68,8 +74,20 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
|
|||
static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
|
||||
struct vgic_lr lr_desc)
|
||||
{
|
||||
u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
|
||||
lr_desc.irq);
|
||||
u64 lr_val;
|
||||
|
||||
lr_val = lr_desc.irq;
|
||||
|
||||
/*
|
||||
* Currently all guest IRQs are Group1, as Group0 would result
|
||||
* in a FIQ in the guest, which it wouldn't expect.
|
||||
* Eventually we want to make this configurable, so we may revisit
|
||||
* this in the future.
|
||||
*/
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
||||
lr_val |= ICH_LR_GROUP;
|
||||
else
|
||||
lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
|
||||
|
||||
if (lr_desc.state & LR_STATE_PENDING)
|
||||
lr_val |= ICH_LR_PENDING_BIT;
|
||||
|
@ -145,15 +163,27 @@ static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
|
|||
|
||||
static void vgic_v3_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
/*
|
||||
* By forcing VMCR to zero, the GIC will restore the binary
|
||||
* points to their reset values. Anything else resets to zero
|
||||
* anyway.
|
||||
*/
|
||||
vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
|
||||
vgic_v3->vgic_vmcr = 0;
|
||||
|
||||
/*
|
||||
* If we are emulating a GICv3, we do it in an non-GICv2-compatible
|
||||
* way, so we force SRE to 1 to demonstrate this to the guest.
|
||||
* This goes with the spec allowing the value to be RAO/WI.
|
||||
*/
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
||||
vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
|
||||
else
|
||||
vgic_v3->vgic_sre = 0;
|
||||
|
||||
/* Get the show on the road... */
|
||||
vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN;
|
||||
vgic_v3->vgic_hcr = ICH_HCR_EN;
|
||||
}
|
||||
|
||||
static const struct vgic_ops vgic_v3_ops = {
|
||||
|
@ -205,35 +235,37 @@ int vgic_v3_probe(struct device_node *vgic_node,
|
|||
* maximum of 16 list registers. Just ignore bit 4...
|
||||
*/
|
||||
vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
|
||||
vgic->can_emulate_gicv2 = false;
|
||||
|
||||
if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
|
||||
gicv_idx = 1;
|
||||
|
||||
gicv_idx += 3; /* Also skip GICD, GICC, GICH */
|
||||
if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
|
||||
kvm_err("Cannot obtain GICV region\n");
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!PAGE_ALIGNED(vcpu_res.start)) {
|
||||
kvm_err("GICV physical address 0x%llx not page aligned\n",
|
||||
kvm_info("GICv3: no GICV resource entry\n");
|
||||
vgic->vcpu_base = 0;
|
||||
} else if (!PAGE_ALIGNED(vcpu_res.start)) {
|
||||
pr_warn("GICV physical address 0x%llx not page aligned\n",
|
||||
(unsigned long long)vcpu_res.start);
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
|
||||
kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
|
||||
vgic->vcpu_base = 0;
|
||||
} else if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
|
||||
pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
|
||||
(unsigned long long)resource_size(&vcpu_res),
|
||||
PAGE_SIZE);
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
vgic->vcpu_base = 0;
|
||||
} else {
|
||||
vgic->vcpu_base = vcpu_res.start;
|
||||
vgic->can_emulate_gicv2 = true;
|
||||
kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
|
||||
KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
}
|
||||
if (vgic->vcpu_base == 0)
|
||||
kvm_info("disabling GICv2 emulation\n");
|
||||
kvm_register_device_ops(&kvm_arm_vgic_v3_ops, KVM_DEV_TYPE_ARM_VGIC_V3);
|
||||
|
||||
vgic->vcpu_base = vcpu_res.start;
|
||||
vgic->vctrl_base = NULL;
|
||||
vgic->type = VGIC_V3;
|
||||
vgic->max_gic_vcpus = KVM_MAX_VCPUS;
|
||||
|
||||
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
|
||||
vcpu_res.start, vgic->maint_irq);
|
||||
|
|
1139
virt/kvm/arm/vgic.c
1139
virt/kvm/arm/vgic.c
File diff suppressed because it is too large
Load Diff
123
virt/kvm/arm/vgic.h
Normal file
123
virt/kvm/arm/vgic.h
Normal file
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Copyright (C) 2012-2014 ARM Ltd.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Derived from virt/kvm/arm/vgic.c
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __KVM_VGIC_H__
|
||||
#define __KVM_VGIC_H__
|
||||
|
||||
#define VGIC_ADDR_UNDEF (-1)
|
||||
#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
|
||||
|
||||
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
|
||||
#define IMPLEMENTER_ARM 0x43b
|
||||
|
||||
#define ACCESS_READ_VALUE (1 << 0)
|
||||
#define ACCESS_READ_RAZ (0 << 0)
|
||||
#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
|
||||
#define ACCESS_WRITE_IGNORED (0 << 1)
|
||||
#define ACCESS_WRITE_SETBIT (1 << 1)
|
||||
#define ACCESS_WRITE_CLEARBIT (2 << 1)
|
||||
#define ACCESS_WRITE_VALUE (3 << 1)
|
||||
#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
|
||||
|
||||
#define VCPU_NOT_ALLOCATED ((u8)-1)
|
||||
|
||||
unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
|
||||
|
||||
void vgic_update_state(struct kvm *kvm);
|
||||
int vgic_init_common_maps(struct kvm *kvm);
|
||||
|
||||
u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset);
|
||||
u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset);
|
||||
|
||||
void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq);
|
||||
void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq);
|
||||
void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq);
|
||||
void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
|
||||
int irq, int val);
|
||||
|
||||
void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
|
||||
void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
|
||||
|
||||
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
|
||||
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
|
||||
|
||||
void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
|
||||
phys_addr_t offset, int mode);
|
||||
bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset);
|
||||
|
||||
static inline
|
||||
u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
|
||||
{
|
||||
return le32_to_cpu(*((u32 *)mmio->data)) & mask;
|
||||
}
|
||||
|
||||
static inline
|
||||
void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
|
||||
{
|
||||
*((u32 *)mmio->data) = cpu_to_le32(value) & mask;
|
||||
}
|
||||
|
||||
struct kvm_mmio_range {
|
||||
phys_addr_t base;
|
||||
unsigned long len;
|
||||
int bits_per_irq;
|
||||
bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset);
|
||||
};
|
||||
|
||||
static inline bool is_in_range(phys_addr_t addr, unsigned long len,
|
||||
phys_addr_t baseaddr, unsigned long size)
|
||||
{
|
||||
return (addr >= baseaddr) && (addr + len <= baseaddr + size);
|
||||
}
|
||||
|
||||
const
|
||||
struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset);
|
||||
|
||||
bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
const struct kvm_mmio_range *ranges,
|
||||
unsigned long mmio_base);
|
||||
|
||||
bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id, int access);
|
||||
|
||||
bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id);
|
||||
|
||||
bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id);
|
||||
|
||||
bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset);
|
||||
|
||||
void vgic_kick_vcpus(struct kvm *kvm);
|
||||
|
||||
int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset);
|
||||
int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
|
||||
int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
|
||||
|
||||
int vgic_init(struct kvm *kvm);
|
||||
void vgic_v2_init_emulation(struct kvm *kvm);
|
||||
void vgic_v3_init_emulation(struct kvm *kvm);
|
||||
|
||||
#endif
|
|
@ -176,6 +176,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|||
return called;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
||||
{
|
||||
long dirty_count = kvm->tlbs_dirty;
|
||||
|
@ -186,6 +187,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|||
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
|
||||
#endif
|
||||
|
||||
void kvm_reload_remote_mmus(struct kvm *kvm)
|
||||
{
|
||||
|
@ -993,6 +995,86 @@ int kvm_get_dirty_log(struct kvm *kvm,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
/**
|
||||
* kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
|
||||
* are dirty write protect them for next write.
|
||||
* @kvm: pointer to kvm instance
|
||||
* @log: slot id and address to which we copy the log
|
||||
* @is_dirty: flag set if any page is dirty
|
||||
*
|
||||
* We need to keep it in mind that VCPU threads can write to the bitmap
|
||||
* concurrently. So, to avoid losing track of dirty pages we keep the
|
||||
* following order:
|
||||
*
|
||||
* 1. Take a snapshot of the bit and clear it if needed.
|
||||
* 2. Write protect the corresponding page.
|
||||
* 3. Copy the snapshot to the userspace.
|
||||
* 4. Upon return caller flushes TLB's if needed.
|
||||
*
|
||||
* Between 2 and 4, the guest may write to the page using the remaining TLB
|
||||
* entry. This is not a problem because the page is reported dirty using
|
||||
* the snapshot taken before and step 4 ensures that writes done after
|
||||
* exiting to userspace will be logged for the next call.
|
||||
*
|
||||
*/
|
||||
int kvm_get_dirty_log_protect(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log, bool *is_dirty)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
int r, i;
|
||||
unsigned long n;
|
||||
unsigned long *dirty_bitmap;
|
||||
unsigned long *dirty_bitmap_buffer;
|
||||
|
||||
r = -EINVAL;
|
||||
if (log->slot >= KVM_USER_MEM_SLOTS)
|
||||
goto out;
|
||||
|
||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
||||
|
||||
dirty_bitmap = memslot->dirty_bitmap;
|
||||
r = -ENOENT;
|
||||
if (!dirty_bitmap)
|
||||
goto out;
|
||||
|
||||
n = kvm_dirty_bitmap_bytes(memslot);
|
||||
|
||||
dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
|
||||
memset(dirty_bitmap_buffer, 0, n);
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
*is_dirty = false;
|
||||
for (i = 0; i < n / sizeof(long); i++) {
|
||||
unsigned long mask;
|
||||
gfn_t offset;
|
||||
|
||||
if (!dirty_bitmap[i])
|
||||
continue;
|
||||
|
||||
*is_dirty = true;
|
||||
|
||||
mask = xchg(&dirty_bitmap[i], 0);
|
||||
dirty_bitmap_buffer[i] = mask;
|
||||
|
||||
offset = i * BITS_PER_LONG;
|
||||
kvm_arch_mmu_write_protect_pt_masked(kvm, memslot, offset,
|
||||
mask);
|
||||
}
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
|
||||
goto out;
|
||||
|
||||
r = 0;
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
|
||||
#endif
|
||||
|
||||
bool kvm_largepages_enabled(void)
|
||||
{
|
||||
return largepages_enabled;
|
||||
|
|
Loading…
Reference in New Issue
Block a user