forked from luck/tmp_suning_uos_patched
KVM: x86/vPMU: Rename pmu_ops callbacks from msr_idx to rdpmc_ecx
The leagcy pmu_ops->msr_idx_to_pmc is only called in kvm_pmu_rdpmc, so this function actually receives the contents of ECX before RDPMC, and translates it to a kvm_pmc. Let's clarify its semantic by renaming the existing msr_idx_to_pmc to rdpmc_ecx_to_pmc, and is_valid_msr_idx to is_valid_rdpmc_ecx; likewise for the wrapper kvm_pmu_is_valid_msr_idx. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Jim Mattson <jmattson@google.com> Signed-off-by: Like Xu <like.xu@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
52ba4b0b99
commit
98ff80f5b7
|
@ -266,9 +266,9 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/* check if idx is a valid index to access PMU */
|
||||
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
|
||||
int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
|
||||
return kvm_x86_ops->pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
|
||||
}
|
||||
|
||||
bool is_vmware_backdoor_pmc(u32 pmc_idx)
|
||||
|
@ -318,7 +318,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
|||
if (is_vmware_backdoor_pmc(idx))
|
||||
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
|
||||
|
||||
pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
|
||||
pmc = kvm_x86_ops->pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
|
||||
if (!pmc)
|
||||
return 1;
|
||||
|
||||
|
|
|
@ -25,9 +25,9 @@ struct kvm_pmu_ops {
|
|||
unsigned (*find_fixed_event)(int idx);
|
||||
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
|
||||
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
|
||||
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
|
||||
u64 *mask);
|
||||
int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
|
||||
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
|
||||
unsigned int idx, u64 *mask);
|
||||
int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
|
||||
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
|
||||
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
|
@ -110,7 +110,7 @@ void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
|
|||
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
|
||||
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
|
||||
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
|
||||
int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
|
||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
|
|
|
@ -174,7 +174,7 @@ static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
|||
}
|
||||
|
||||
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
|
||||
static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
|
||||
static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
||||
|
@ -184,7 +184,8 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
|
|||
}
|
||||
|
||||
/* idx is the ECX register of RDPMC instruction */
|
||||
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
|
||||
static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
|
||||
unsigned int idx, u64 *mask)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct kvm_pmc *counters;
|
||||
|
@ -306,8 +307,8 @@ struct kvm_pmu_ops amd_pmu_ops = {
|
|||
.find_fixed_event = amd_find_fixed_event,
|
||||
.pmc_is_enabled = amd_pmc_is_enabled,
|
||||
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
|
||||
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
|
||||
.is_valid_msr_idx = amd_is_valid_msr_idx,
|
||||
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
|
||||
.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
|
||||
.is_valid_msr = amd_is_valid_msr,
|
||||
.get_msr = amd_pmu_get_msr,
|
||||
.set_msr = amd_pmu_set_msr,
|
||||
|
|
|
@ -111,7 +111,7 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
|||
}
|
||||
|
||||
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
|
||||
static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
|
||||
static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
bool fixed = idx & (1u << 30);
|
||||
|
@ -122,8 +122,8 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
|
|||
(fixed && idx >= pmu->nr_arch_fixed_counters);
|
||||
}
|
||||
|
||||
static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
|
||||
unsigned idx, u64 *mask)
|
||||
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
|
||||
unsigned int idx, u64 *mask)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
bool fixed = idx & (1u << 30);
|
||||
|
@ -366,8 +366,8 @@ struct kvm_pmu_ops intel_pmu_ops = {
|
|||
.find_fixed_event = intel_find_fixed_event,
|
||||
.pmc_is_enabled = intel_pmc_is_enabled,
|
||||
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
|
||||
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
|
||||
.is_valid_msr_idx = intel_is_valid_msr_idx,
|
||||
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
|
||||
.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
|
||||
.is_valid_msr = intel_is_valid_msr,
|
||||
.get_msr = intel_pmu_get_msr,
|
||||
.set_msr = intel_pmu_set_msr,
|
||||
|
|
|
@ -6144,7 +6144,7 @@ static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
|||
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
|
||||
u32 pmc)
|
||||
{
|
||||
return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
|
||||
return kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc);
|
||||
}
|
||||
|
||||
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
|
||||
|
|
Loading…
Reference in New Issue
Block a user