forked from luck/tmp_suning_uos_patched
KVM/arm fixes for 5.8, take #3
- Disable preemption on context-switching PMU EL0 state happening on system register trap - Don't clobber X0 when tearing down KVM via a soft reset (kexec) -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAl8DAcQPHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpD7qgP/3xVVCyO32eqfe+Vh+0cHun3yfMKYPHdhrT4 rXPoJbUFNXu/pvMuPd1plh5DJM/sdZtrC96tgswDmF00Yk9ObdLDafgSFCrscfMU GrgSEGplTve1RArzATSC64nfFzJqkJlLNywJeD1bFmh4qC5rbm7w8jStf4H6MEp3 sBXedqQO+WswDLQAPnMCXw5t4uNU4urA757fKWJAgJv2Xwx0NKG+3PFbvx9I4dcK M0q+266ufDbq3xv6bhvFqo4kOnZQKH2fwIENRZVHi+5iU7Em2EX6NpzmbxxGCAfD d3jGCcrJ8f959j/d23k0lZXU0V30h3gvifGHUwEz/hqnbi1+EOt9pGqnqMWgmwPf QdoVtL9CdUeyAlfROLMBp0gxXwlUgN5dodCE9BlTzctUCQFRTsZPgAktEDlKcUWh rFz4LyoZ82OiC+db+wOHRTL+q7b0gs/KUgLbkeu26WGvDuLU4kEtol1ScmrfSm7O VzKxHzaxLsm6LP/OcS+9r9qHU2RRHhJnuwSNmsueGnKSFV2AECrsgoKGO5Tl2kKU rWnpJY/lqlChGsu4nfPeRQIcQtVL3H6BM7hz7d+EM4/Yx70EzmaTOB8K2C15o6BI tDuLWzzAaaY4YqFEVgvaieRuI+2qZWCh8jSu5sPuC6StfpqDD/G4221Q6vn3ioor g8hUv78j =/VkN -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-5.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master KVM/arm fixes for 5.8, take #3 - Disable preemption on context-switching PMU EL0 state happening on system register trap - Don't clobber X0 when tearing down KVM via a soft reset (kexec)
This commit is contained in:
commit
8038a922cf
|
@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
|
|||
|
||||
1: cmp x0, #HVC_RESET_VECTORS
|
||||
b.ne 1f
|
||||
reset:
|
||||
|
||||
/*
|
||||
* Reset kvm back to the hyp stub. Do not clobber x0-x4 in
|
||||
* case we coming via HVC_SOFT_RESTART.
|
||||
* Set the HVC_RESET_VECTORS return code before entering the common
|
||||
* path so that we do not clobber x0-x2 in case we are coming via
|
||||
* HVC_SOFT_RESTART.
|
||||
*/
|
||||
mov x0, xzr
|
||||
reset:
|
||||
/* Reset kvm back to the hyp stub. */
|
||||
mrs x5, sctlr_el2
|
||||
mov_q x6, SCTLR_ELx_FLAGS
|
||||
bic x5, x5, x6 // Clear SCTL_M and etc
|
||||
|
@ -151,7 +155,6 @@ reset:
|
|||
/* Install stub vectors */
|
||||
adr_l x5, __hyp_stub_vectors
|
||||
msr vbar_el2, x5
|
||||
mov x0, xzr
|
||||
eret
|
||||
|
||||
1: /* Bad stub call */
|
||||
|
|
|
@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
|
|||
}
|
||||
|
||||
/*
|
||||
* On VHE ensure that only guest events have EL0 counting enabled
|
||||
* On VHE ensure that only guest events have EL0 counting enabled.
|
||||
* This is called from both vcpu_{load,put} and the sysreg handling.
|
||||
* Since the latter is preemptible, special care must be taken to
|
||||
* disable preemption.
|
||||
*/
|
||||
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
|
|||
if (!has_vhe())
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
host = this_cpu_ptr(&kvm_host_data);
|
||||
events_guest = host->pmu_events.events_guest;
|
||||
events_host = host->pmu_events.events_host;
|
||||
|
||||
kvm_vcpu_pmu_enable_el0(events_guest);
|
||||
kvm_vcpu_pmu_disable_el0(events_host);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user