forked from luck/tmp_suning_uos_patched
KVM: MMU: using __xchg_spte more smarter
Sometimes, atomically set spte is not needed, this patch call __xchg_spte() more smartly Note: if the old mapping's access bit is already set, we no need atomic operation since the access bit is not lost Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
e4b502ead2
commit
9a3aad7057
@ -682,9 +682,14 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
|
||||
static void set_spte_track_bits(u64 *sptep, u64 new_spte)
|
||||
{
|
||||
pfn_t pfn;
|
||||
u64 old_spte;
|
||||
u64 old_spte = *sptep;
|
||||
|
||||
if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) ||
|
||||
old_spte & shadow_accessed_mask) {
|
||||
__set_spte(sptep, new_spte);
|
||||
} else
|
||||
old_spte = __xchg_spte(sptep, new_spte);
|
||||
|
||||
old_spte = __xchg_spte(sptep, new_spte);
|
||||
if (!is_rmap_spte(old_spte))
|
||||
return;
|
||||
pfn = spte_to_pfn(old_spte);
|
||||
|
Loading…
Reference in New Issue
Block a user