forked from luck/tmp_suning_uos_patched
Merge remote-tracking branch 'remotes/powerpc/topic/ppc-kvm' into kvm-ppc-next
This merges in the "ppc-kvm" topic branch of the powerpc tree to get a series of commits that touch both general arch/powerpc code and KVM code. These commits will be merged both via the KVM tree and the powerpc tree. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
commit
0a0c50f771
|
@ -143,6 +143,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
|
|||
|
||||
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
|
||||
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
|
||||
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
|
||||
|
@ -646,7 +647,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
|
|||
unsigned int yield_count);
|
||||
long kvmppc_h_random(struct kvm_vcpu *vcpu);
|
||||
void kvmhv_commence_exit(int trap);
|
||||
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
|
||||
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
|
||||
void kvmppc_subcore_enter_guest(void);
|
||||
void kvmppc_subcore_exit_guest(void);
|
||||
long kvmppc_realmode_hmi_handler(void);
|
||||
|
|
|
@ -209,7 +209,7 @@ extern int get_mce_event(struct machine_check_event *mce, bool release);
|
|||
extern void release_mce_event(void);
|
||||
extern void machine_check_queue_event(void);
|
||||
extern void machine_check_print_event_info(struct machine_check_event *evt,
|
||||
bool user_mode);
|
||||
bool user_mode, bool in_guest);
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
void flush_and_reload_slb(void);
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
|
|
@ -301,13 +301,13 @@ static void machine_check_process_queued_event(struct irq_work *work)
|
|||
while (__this_cpu_read(mce_queue_count) > 0) {
|
||||
index = __this_cpu_read(mce_queue_count) - 1;
|
||||
evt = this_cpu_ptr(&mce_event_queue[index]);
|
||||
machine_check_print_event_info(evt, false);
|
||||
machine_check_print_event_info(evt, false, false);
|
||||
__this_cpu_dec(mce_queue_count);
|
||||
}
|
||||
}
|
||||
|
||||
void machine_check_print_event_info(struct machine_check_event *evt,
|
||||
bool user_mode)
|
||||
bool user_mode, bool in_guest)
|
||||
{
|
||||
const char *level, *sevstr, *subtype;
|
||||
static const char *mc_ue_types[] = {
|
||||
|
@ -387,7 +387,9 @@ void machine_check_print_event_info(struct machine_check_event *evt,
|
|||
evt->disposition == MCE_DISPOSITION_RECOVERED ?
|
||||
"Recovered" : "Not recovered");
|
||||
|
||||
if (user_mode) {
|
||||
if (in_guest) {
|
||||
printk("%s Guest NIP: %016llx\n", level, evt->srr0);
|
||||
} else if (user_mode) {
|
||||
printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
|
||||
evt->srr0, current->pid, current->comm);
|
||||
} else {
|
||||
|
|
|
@ -195,6 +195,13 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
|
||||
|
||||
void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
|
||||
{
|
||||
/* might as well deliver this straight away */
|
||||
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
|
||||
|
||||
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
|
||||
{
|
||||
/* might as well deliver this straight away */
|
||||
|
|
|
@ -1217,6 +1217,22 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
r = RESUME_GUEST;
|
||||
break;
|
||||
case BOOK3S_INTERRUPT_MACHINE_CHECK:
|
||||
/* Print the MCE event to host console. */
|
||||
machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
|
||||
|
||||
/*
|
||||
* If the guest can do FWNMI, exit to userspace so it can
|
||||
* deliver a FWNMI to the guest.
|
||||
* Otherwise we synthesize a machine check for the guest
|
||||
* so that it knows that the machine check occurred.
|
||||
*/
|
||||
if (!vcpu->kvm->arch.fwnmi_enabled) {
|
||||
ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
|
||||
kvmppc_core_queue_machine_check(vcpu, flags);
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Exit to guest with KVM_EXIT_NMI as exit reason */
|
||||
run->exit_reason = KVM_EXIT_NMI;
|
||||
run->hw.hardware_exit_reason = vcpu->arch.trap;
|
||||
|
@ -1229,8 +1245,6 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
|
||||
|
||||
r = RESUME_HOST;
|
||||
/* Print the MCE event to host console. */
|
||||
machine_check_print_event_info(&vcpu->arch.mce_evt, false);
|
||||
break;
|
||||
case BOOK3S_INTERRUPT_PROGRAM:
|
||||
{
|
||||
|
@ -1394,7 +1408,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
/* Pass the machine check to the L1 guest */
|
||||
r = RESUME_HOST;
|
||||
/* Print the MCE event to host console. */
|
||||
machine_check_print_event_info(&vcpu->arch.mce_evt, false);
|
||||
machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
|
||||
break;
|
||||
/*
|
||||
* We get these next two if the guest accesses a page which it thinks
|
||||
|
@ -3457,6 +3471,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
unsigned long host_dscr = mfspr(SPRN_DSCR);
|
||||
unsigned long host_tidr = mfspr(SPRN_TIDR);
|
||||
unsigned long host_iamr = mfspr(SPRN_IAMR);
|
||||
unsigned long host_amr = mfspr(SPRN_AMR);
|
||||
s64 dec;
|
||||
u64 tb;
|
||||
int trap, save_pmu;
|
||||
|
@ -3573,13 +3588,15 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
|
||||
mtspr(SPRN_PSPB, 0);
|
||||
mtspr(SPRN_WORT, 0);
|
||||
mtspr(SPRN_AMR, 0);
|
||||
mtspr(SPRN_UAMOR, 0);
|
||||
mtspr(SPRN_DSCR, host_dscr);
|
||||
mtspr(SPRN_TIDR, host_tidr);
|
||||
mtspr(SPRN_IAMR, host_iamr);
|
||||
mtspr(SPRN_PSPB, 0);
|
||||
|
||||
if (host_amr != vcpu->arch.amr)
|
||||
mtspr(SPRN_AMR, host_amr);
|
||||
|
||||
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
|
||||
store_fp_state(&vcpu->arch.fp);
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
|
|
|
@ -66,10 +66,8 @@ static void reload_slb(struct kvm_vcpu *vcpu)
|
|||
/*
|
||||
* On POWER7, see if we can handle a machine check that occurred inside
|
||||
* the guest in real mode, without switching to the host partition.
|
||||
*
|
||||
* Returns: 0 => exit guest, 1 => deliver machine check to guest
|
||||
*/
|
||||
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long srr1 = vcpu->arch.shregs.msr;
|
||||
struct machine_check_event mce_evt;
|
||||
|
@ -111,52 +109,24 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/*
|
||||
* See if we have already handled the condition in the linux host.
|
||||
* We assume that if the condition is recovered then linux host
|
||||
* will have generated an error log event that we will pick
|
||||
* up and log later.
|
||||
* Don't release mce event now. We will queue up the event so that
|
||||
* we can log the MCE event info on host console.
|
||||
* Now get the event and stash it in the vcpu struct so it can
|
||||
* be handled by the primary thread in virtual mode. We can't
|
||||
* call machine_check_queue_event() here if we are running on
|
||||
* an offline secondary thread.
|
||||
*/
|
||||
if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
|
||||
goto out;
|
||||
if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
|
||||
if (handled && mce_evt.version == MCE_V1)
|
||||
mce_evt.disposition = MCE_DISPOSITION_RECOVERED;
|
||||
} else {
|
||||
memset(&mce_evt, 0, sizeof(mce_evt));
|
||||
}
|
||||
|
||||
if (mce_evt.version == MCE_V1 &&
|
||||
(mce_evt.severity == MCE_SEV_NO_ERROR ||
|
||||
mce_evt.disposition == MCE_DISPOSITION_RECOVERED))
|
||||
handled = 1;
|
||||
|
||||
out:
|
||||
/*
|
||||
* For guest that supports FWNMI capability, hook the MCE event into
|
||||
* vcpu structure. We are going to exit the guest with KVM_EXIT_NMI
|
||||
* exit reason. On our way to exit we will pull this event from vcpu
|
||||
* structure and print it from thread 0 of the core/subcore.
|
||||
*
|
||||
* For guest that does not support FWNMI capability (old QEMU):
|
||||
* We are now going enter guest either through machine check
|
||||
* interrupt (for unhandled errors) or will continue from
|
||||
* current HSRR0 (for handled errors) in guest. Hence
|
||||
* queue up the event so that we can log it from host console later.
|
||||
*/
|
||||
if (vcpu->kvm->arch.fwnmi_enabled) {
|
||||
/*
|
||||
* Hook up the mce event on to vcpu structure.
|
||||
* First clear the old event.
|
||||
*/
|
||||
memset(&vcpu->arch.mce_evt, 0, sizeof(vcpu->arch.mce_evt));
|
||||
if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
|
||||
vcpu->arch.mce_evt = mce_evt;
|
||||
}
|
||||
} else
|
||||
machine_check_queue_event();
|
||||
|
||||
return handled;
|
||||
vcpu->arch.mce_evt = mce_evt;
|
||||
}
|
||||
|
||||
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
|
||||
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvmppc_realmode_mc_power7(vcpu);
|
||||
kvmppc_realmode_mc_power7(vcpu);
|
||||
}
|
||||
|
||||
/* Check if dynamic split is in force and return subcore size accordingly. */
|
||||
|
|
|
@ -58,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
|||
#define STACK_SLOT_DAWR (SFS-56)
|
||||
#define STACK_SLOT_DAWRX (SFS-64)
|
||||
#define STACK_SLOT_HFSCR (SFS-72)
|
||||
#define STACK_SLOT_AMR (SFS-80)
|
||||
#define STACK_SLOT_UAMOR (SFS-88)
|
||||
/* the following is used by the P9 short path */
|
||||
#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
|
||||
|
||||
|
@ -726,11 +728,9 @@ BEGIN_FTR_SECTION
|
|||
mfspr r5, SPRN_TIDR
|
||||
mfspr r6, SPRN_PSSCR
|
||||
mfspr r7, SPRN_PID
|
||||
mfspr r8, SPRN_IAMR
|
||||
std r5, STACK_SLOT_TID(r1)
|
||||
std r6, STACK_SLOT_PSSCR(r1)
|
||||
std r7, STACK_SLOT_PID(r1)
|
||||
std r8, STACK_SLOT_IAMR(r1)
|
||||
mfspr r5, SPRN_HFSCR
|
||||
std r5, STACK_SLOT_HFSCR(r1)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
|
@ -738,11 +738,18 @@ BEGIN_FTR_SECTION
|
|||
mfspr r5, SPRN_CIABR
|
||||
mfspr r6, SPRN_DAWR
|
||||
mfspr r7, SPRN_DAWRX
|
||||
mfspr r8, SPRN_IAMR
|
||||
std r5, STACK_SLOT_CIABR(r1)
|
||||
std r6, STACK_SLOT_DAWR(r1)
|
||||
std r7, STACK_SLOT_DAWRX(r1)
|
||||
std r8, STACK_SLOT_IAMR(r1)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
|
||||
mfspr r5, SPRN_AMR
|
||||
std r5, STACK_SLOT_AMR(r1)
|
||||
mfspr r6, SPRN_UAMOR
|
||||
std r6, STACK_SLOT_UAMOR(r1)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
/* Set partition DABR */
|
||||
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
|
||||
|
@ -1631,22 +1638,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
|||
mtspr SPRN_PSPB, r0
|
||||
mtspr SPRN_WORT, r0
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_IAMR, r0
|
||||
mtspr SPRN_TCSCR, r0
|
||||
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
|
||||
li r0, 1
|
||||
sldi r0, r0, 31
|
||||
mtspr SPRN_MMCRS, r0
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||
8:
|
||||
|
||||
/* Save and reset AMR and UAMOR before turning on the MMU */
|
||||
/* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
|
||||
ld r8, STACK_SLOT_IAMR(r1)
|
||||
mtspr SPRN_IAMR, r8
|
||||
|
||||
8: /* Power7 jumps back in here */
|
||||
mfspr r5,SPRN_AMR
|
||||
mfspr r6,SPRN_UAMOR
|
||||
std r5,VCPU_AMR(r9)
|
||||
std r6,VCPU_UAMOR(r9)
|
||||
li r6,0
|
||||
mtspr SPRN_AMR,r6
|
||||
ld r5,STACK_SLOT_AMR(r1)
|
||||
ld r6,STACK_SLOT_UAMOR(r1)
|
||||
mtspr SPRN_AMR, r5
|
||||
mtspr SPRN_UAMOR, r6
|
||||
|
||||
/* Switch DSCR back to host value */
|
||||
|
@ -1746,11 +1756,9 @@ BEGIN_FTR_SECTION
|
|||
ld r5, STACK_SLOT_TID(r1)
|
||||
ld r6, STACK_SLOT_PSSCR(r1)
|
||||
ld r7, STACK_SLOT_PID(r1)
|
||||
ld r8, STACK_SLOT_IAMR(r1)
|
||||
mtspr SPRN_TIDR, r5
|
||||
mtspr SPRN_PSSCR, r6
|
||||
mtspr SPRN_PID, r7
|
||||
mtspr SPRN_IAMR, r8
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
|
||||
#ifdef CONFIG_PPC_RADIX_MMU
|
||||
|
@ -2836,49 +2844,15 @@ kvm_cede_exit:
|
|||
#endif /* CONFIG_KVM_XICS */
|
||||
3: b guest_exit_cont
|
||||
|
||||
/* Try to handle a machine check in real mode */
|
||||
/* Try to do machine check recovery in real mode */
|
||||
machine_check_realmode:
|
||||
mr r3, r9 /* get vcpu pointer */
|
||||
bl kvmppc_realmode_machine_check
|
||||
nop
|
||||
/* all machine checks go to virtual mode for further handling */
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||
/*
|
||||
* For the guest that is FWNMI capable, deliver all the MCE errors
|
||||
* (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
|
||||
* reason. This new approach injects machine check errors in guest
|
||||
* address space to guest with additional information in the form
|
||||
* of RTAS event, thus enabling guest kernel to suitably handle
|
||||
* such errors.
|
||||
*
|
||||
* For the guest that is not FWNMI capable (old QEMU) fallback
|
||||
* to old behaviour for backward compatibility:
|
||||
* Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
|
||||
* through machine check interrupt (set HSRR0 to 0x200).
|
||||
* For handled errors (no-fatal), just go back to guest execution
|
||||
* with current HSRR0.
|
||||
* if we receive machine check with MSR(RI=0) then deliver it to
|
||||
* guest as machine check causing guest to crash.
|
||||
*/
|
||||
ld r11, VCPU_MSR(r9)
|
||||
rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
|
||||
bne guest_exit_cont /* if so, exit to host */
|
||||
/* Check if guest is capable of handling NMI exit */
|
||||
ld r10, VCPU_KVM(r9)
|
||||
lbz r10, KVM_FWNMI(r10)
|
||||
cmpdi r10, 1 /* FWNMI capable? */
|
||||
beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */
|
||||
|
||||
/* if not, fall through for backward compatibility. */
|
||||
andi. r10, r11, MSR_RI /* check for unrecoverable exception */
|
||||
beq 1f /* Deliver a machine check to guest */
|
||||
ld r10, VCPU_PC(r9)
|
||||
cmpdi r3, 0 /* Did we handle MCE ? */
|
||||
bne 2f /* Continue guest execution. */
|
||||
/* If not, deliver a machine check. SRR0/1 are already set */
|
||||
1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||
bl kvmppc_msr_interrupt
|
||||
2: b fast_interrupt_c_return
|
||||
b guest_exit_cont
|
||||
|
||||
/*
|
||||
* Call C code to handle a HMI in real mode.
|
||||
|
|
|
@ -587,7 +587,7 @@ int opal_machine_check(struct pt_regs *regs)
|
|||
evt.version);
|
||||
return 0;
|
||||
}
|
||||
machine_check_print_event_info(&evt, user_mode(regs));
|
||||
machine_check_print_event_info(&evt, user_mode(regs), false);
|
||||
|
||||
if (opal_recover_mce(regs, &evt))
|
||||
return 1;
|
||||
|
|
Loading…
Reference in New Issue
Block a user