forked from luck/tmp_suning_uos_patched
KVM: MMU: Modify kvm_shadow_walk.entry to accept u64 addr
EPT is 4 level by default in 32pae(48 bits), but the addr parameter of kvm_shadow_walk->entry() only accept unsigned long as virtual address, which is 32bit in 32pae. This result in SHADOW_PT_INDEX() overflow when try to fetch level 4 index. Fix it by extend kvm_shadow_walk->entry() to accept 64bit addr in parameter. Signed-off-by: Sheng Yang <sheng.yang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
parent
8c4b537da7
commit
d40a1ee485
@ -144,7 +144,7 @@ struct kvm_rmap_desc {
|
||||
|
||||
struct kvm_shadow_walk {
|
||||
int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
|
||||
gva_t addr, u64 *spte, int level);
|
||||
u64 addr, u64 *spte, int level);
|
||||
};
|
||||
|
||||
static struct kmem_cache *pte_chain_cache;
|
||||
@ -941,7 +941,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
static int walk_shadow(struct kvm_shadow_walk *walker,
|
||||
struct kvm_vcpu *vcpu, gva_t addr)
|
||||
struct kvm_vcpu *vcpu, u64 addr)
|
||||
{
|
||||
hpa_t shadow_addr;
|
||||
int level;
|
||||
@ -1270,7 +1270,7 @@ struct direct_shadow_walk {
|
||||
|
||||
static int direct_map_entry(struct kvm_shadow_walk *_walk,
|
||||
struct kvm_vcpu *vcpu,
|
||||
gva_t addr, u64 *sptep, int level)
|
||||
u64 addr, u64 *sptep, int level)
|
||||
{
|
||||
struct direct_shadow_walk *walk =
|
||||
container_of(_walk, struct direct_shadow_walk, walker);
|
||||
@ -1289,7 +1289,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk,
|
||||
|
||||
if (*sptep == shadow_trap_nonpresent_pte) {
|
||||
pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
|
||||
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, addr, level - 1,
|
||||
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
|
||||
1, ACC_ALL, sptep);
|
||||
if (!sp) {
|
||||
pgprintk("nonpaging_map: ENOMEM\n");
|
||||
@ -1317,7 +1317,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
||||
.pt_write = 0,
|
||||
};
|
||||
|
||||
r = walk_shadow(&walker.walker, vcpu, (gva_t)gfn << PAGE_SHIFT);
|
||||
r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
return walker.pt_write;
|
||||
|
@ -286,7 +286,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
|
||||
* Fetch a shadow pte for a specific level in the paging hierarchy.
|
||||
*/
|
||||
static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
|
||||
struct kvm_vcpu *vcpu, gva_t addr,
|
||||
struct kvm_vcpu *vcpu, u64 addr,
|
||||
u64 *sptep, int level)
|
||||
{
|
||||
struct shadow_walker *sw =
|
||||
@ -326,7 +326,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
|
||||
metaphysical = 0;
|
||||
table_gfn = gw->table_gfn[level - 2];
|
||||
}
|
||||
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
|
||||
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
|
||||
metaphysical, access, sptep);
|
||||
if (!metaphysical) {
|
||||
r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
|
||||
|
Loading…
Reference in New Issue
Block a user