forked from luck/tmp_suning_uos_patched
[PATCH] KVM: MMU: Page table write flood protection
In fork() (or when we protect a page that is no longer a page table), we can experience floods of writes to a page, which have to be emulated. This is expensive. So, if we detect such a flood, zap the page so subsequent writes can proceed natively. Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
139bdb2d9e
commit
86a5ba025d
@ -238,6 +238,9 @@ struct kvm_vcpu {
|
|||||||
struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
|
struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
|
||||||
struct kvm_mmu mmu;
|
struct kvm_mmu mmu;
|
||||||
|
|
||||||
|
gfn_t last_pt_write_gfn;
|
||||||
|
int last_pt_write_count;
|
||||||
|
|
||||||
struct kvm_guest_debug guest_debug;
|
struct kvm_guest_debug guest_debug;
|
||||||
|
|
||||||
char fx_buf[FX_BUF_SIZE];
|
char fx_buf[FX_BUF_SIZE];
|
||||||
|
@ -969,8 +969,17 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
|
|||||||
unsigned page_offset;
|
unsigned page_offset;
|
||||||
unsigned misaligned;
|
unsigned misaligned;
|
||||||
int level;
|
int level;
|
||||||
|
int flooded = 0;
|
||||||
|
|
||||||
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
|
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
|
||||||
|
if (gfn == vcpu->last_pt_write_gfn) {
|
||||||
|
++vcpu->last_pt_write_count;
|
||||||
|
if (vcpu->last_pt_write_count >= 3)
|
||||||
|
flooded = 1;
|
||||||
|
} else {
|
||||||
|
vcpu->last_pt_write_gfn = gfn;
|
||||||
|
vcpu->last_pt_write_count = 1;
|
||||||
|
}
|
||||||
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
|
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
|
||||||
bucket = &vcpu->kvm->mmu_page_hash[index];
|
bucket = &vcpu->kvm->mmu_page_hash[index];
|
||||||
hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
|
hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
|
||||||
@ -978,11 +987,16 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
|
|||||||
continue;
|
continue;
|
||||||
pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
|
pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
|
||||||
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
|
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
|
||||||
if (misaligned) {
|
if (misaligned || flooded) {
|
||||||
/*
|
/*
|
||||||
* Misaligned accesses are too much trouble to fix
|
* Misaligned accesses are too much trouble to fix
|
||||||
* up; also, they usually indicate a page is not used
|
* up; also, they usually indicate a page is not used
|
||||||
* as a page table.
|
* as a page table.
|
||||||
|
*
|
||||||
|
* If we're seeing too many writes to a page,
|
||||||
|
* it may no longer be a page table, or we may be
|
||||||
|
* forking, in which case it is better to unmap the
|
||||||
|
* page.
|
||||||
*/
|
*/
|
||||||
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
|
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
|
||||||
gpa, bytes, page->role.word);
|
gpa, bytes, page->role.word);
|
||||||
|
Loading…
Reference in New Issue
Block a user