forked from luck/tmp_suning_uos_patched
Merge branch 'kvm-updates-2.6.26' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates-2.6.26' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: KVM: MMU: Fix is_empty_shadow_page() check KVM: MMU: Fix printk() format string KVM: IOAPIC: only set remote_irr if interrupt was injected KVM: MMU: reschedule during shadow teardown KVM: VMX: Clear CR4.VMXE in hardware_disable KVM: migrate PIT timer KVM: ppc: Report bad GFNs KVM: ppc: Use a read lock around MMU operations, and release it on error KVM: ppc: Remove unmatched kunmap() call KVM: ppc: add lwzx/stwz emulation KVM: ppc: Remove duplicate function KVM: s390: Fix race condition in kvm_s390_handle_wait KVM: s390: Send program check on access error KVM: s390: fix interrupt delivery KVM: s390: handle machine checks when guest is running KVM: s390: fix locking order problem in enable_sie KVM: s390: use yield instead of schedule to implement diag 0x44 KVM: x86 emulator: fix hypercall return value on AMD KVM: ia64: fix zero extending for mmio ld1/2/4 emulation in KVM
This commit is contained in:
commit
a4df1ac12d
|
@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
|
|||
|
||||
if (p->u.ioreq.state == STATE_IORESP_READY) {
|
||||
if (dir == IOREQ_READ)
|
||||
*dest = p->u.ioreq.data;
|
||||
/* it's necessary to ensure zero extending */
|
||||
*dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
|
||||
} else
|
||||
panic_vm(vcpu);
|
||||
out:
|
||||
|
|
|
@ -116,8 +116,6 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
|
|||
struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
|
||||
struct page *page = vcpu->arch.shadow_pages[index];
|
||||
|
||||
kunmap(vcpu->arch.shadow_pages[index]);
|
||||
|
||||
if (get_tlb_v(stlbe)) {
|
||||
if (kvmppc_44x_tlbe_is_writable(stlbe))
|
||||
kvm_release_page_dirty(page);
|
||||
|
@ -144,18 +142,19 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
|
|||
stlbe = &vcpu->arch.shadow_tlb[victim];
|
||||
|
||||
/* Get reference to new page. */
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
new_page = gfn_to_page(vcpu->kvm, gfn);
|
||||
if (is_error_page(new_page)) {
|
||||
printk(KERN_ERR "Couldn't get guest page!\n");
|
||||
printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
|
||||
kvm_release_page_clean(new_page);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
return;
|
||||
}
|
||||
hpaddr = page_to_phys(new_page);
|
||||
|
||||
/* Drop reference to old page. */
|
||||
kvmppc_44x_shadow_release(vcpu, victim);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
vcpu->arch.shadow_pages[victim] = new_page;
|
||||
|
||||
|
|
|
@ -227,39 +227,6 @@ void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er;
|
||||
int r;
|
||||
|
||||
er = kvmppc_emulate_instruction(run, vcpu);
|
||||
switch (er) {
|
||||
case EMULATE_DONE:
|
||||
/* Future optimization: only reload non-volatiles if they were
|
||||
* actually modified. */
|
||||
r = RESUME_GUEST_NV;
|
||||
break;
|
||||
case EMULATE_DO_MMIO:
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
/* We must reload nonvolatiles because "update" load/store
|
||||
* instructions modify register state. */
|
||||
/* Future optimization: only reload non-volatiles if they were
|
||||
* actually modified. */
|
||||
r = RESUME_HOST_NV;
|
||||
break;
|
||||
case EMULATE_FAIL:
|
||||
/* XXX Deliver Program interrupt to guest. */
|
||||
printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
|
||||
vcpu->arch.last_inst);
|
||||
r = RESUME_HOST;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvmppc_handle_exit
|
||||
*
|
||||
|
|
|
@ -246,6 +246,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
case 31:
|
||||
switch (get_xop(inst)) {
|
||||
|
||||
case 23: /* lwzx */
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||
break;
|
||||
|
||||
case 83: /* mfmsr */
|
||||
rt = get_rt(inst);
|
||||
vcpu->arch.gpr[rt] = vcpu->arch.msr;
|
||||
|
@ -267,6 +272,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
|
||||
break;
|
||||
|
||||
case 151: /* stwx */
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
vcpu->arch.gpr[rs],
|
||||
4, 1);
|
||||
break;
|
||||
|
||||
case 163: /* wrteei */
|
||||
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
|
||||
| (inst & MSR_EE);
|
||||
|
|
|
@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
|
|||
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
|
||||
vcpu->stat.diagnose_44++;
|
||||
vcpu_put(vcpu);
|
||||
schedule();
|
||||
yield();
|
||||
vcpu_load(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
|
|||
if (kvm_cpu_has_interrupt(vcpu))
|
||||
return 0;
|
||||
|
||||
__set_cpu_idle(vcpu);
|
||||
spin_lock_bh(&vcpu->arch.local_int.lock);
|
||||
vcpu->arch.local_int.timer_due = 0;
|
||||
spin_unlock_bh(&vcpu->arch.local_int.lock);
|
||||
|
||||
if (psw_interrupts_disabled(vcpu)) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
|
||||
__unset_cpu_idle(vcpu);
|
||||
|
@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
|
|||
no_timer:
|
||||
spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
|
||||
spin_lock_bh(&vcpu->arch.local_int.lock);
|
||||
__set_cpu_idle(vcpu);
|
||||
vcpu->arch.local_int.timer_due = 0;
|
||||
add_wait_queue(&vcpu->arch.local_int.wq, &wait);
|
||||
while (list_empty(&vcpu->arch.local_int.list) &&
|
||||
list_empty(&vcpu->arch.local_int.float_int->list) &&
|
||||
|
|
|
@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
|||
return -EINVAL; /* not implemented yet */
|
||||
}
|
||||
|
||||
extern void s390_handle_mcck(void);
|
||||
|
||||
static void __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
|
||||
|
@ -430,13 +432,21 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (need_resched())
|
||||
schedule();
|
||||
|
||||
if (test_thread_flag(TIF_MCCK_PENDING))
|
||||
s390_handle_mcck();
|
||||
|
||||
kvm_s390_deliver_pending_interrupts(vcpu);
|
||||
|
||||
vcpu->arch.sie_block->icptcode = 0;
|
||||
local_irq_disable();
|
||||
kvm_guest_enter();
|
||||
local_irq_enable();
|
||||
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
|
||||
atomic_read(&vcpu->arch.sie_block->cpuflags));
|
||||
sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
|
||||
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
|
||||
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
}
|
||||
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
|
||||
vcpu->arch.sie_block->icptcode);
|
||||
local_irq_disable();
|
||||
|
@ -475,7 +485,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
might_sleep();
|
||||
|
||||
do {
|
||||
kvm_s390_deliver_pending_interrupts(vcpu);
|
||||
__vcpu_run(vcpu);
|
||||
rc = kvm_handle_sie_intercept(vcpu);
|
||||
} while (!signal_pending(current) && !rc);
|
||||
|
|
|
@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
|
|||
int s390_enable_sie(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm;
|
||||
int rc;
|
||||
struct mm_struct *mm, *old_mm;
|
||||
|
||||
task_lock(tsk);
|
||||
|
||||
rc = 0;
|
||||
/* Do we have pgstes? if yes, we are done */
|
||||
if (tsk->mm->context.pgstes)
|
||||
goto unlock;
|
||||
return 0;
|
||||
|
||||
rc = -EINVAL;
|
||||
/* lets check if we are allowed to replace the mm */
|
||||
task_lock(tsk);
|
||||
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
|
||||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list)
|
||||
goto unlock;
|
||||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
|
||||
task_unlock(tsk);
|
||||
return -EINVAL;
|
||||
}
|
||||
task_unlock(tsk);
|
||||
|
||||
tsk->mm->context.pgstes = 1; /* dirty little tricks .. */
|
||||
/* we copy the mm with pgstes enabled */
|
||||
tsk->mm->context.pgstes = 1;
|
||||
mm = dup_mm(tsk);
|
||||
tsk->mm->context.pgstes = 0;
|
||||
|
||||
rc = -ENOMEM;
|
||||
if (!mm)
|
||||
goto unlock;
|
||||
mmput(tsk->mm);
|
||||
return -ENOMEM;
|
||||
|
||||
/* Now lets check again if somebody attached ptrace etc */
|
||||
task_lock(tsk);
|
||||
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
|
||||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
|
||||
mmput(mm);
|
||||
task_unlock(tsk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ok, we are alone. No ptrace, no threads, etc. */
|
||||
old_mm = tsk->mm;
|
||||
tsk->mm = tsk->active_mm = mm;
|
||||
preempt_disable();
|
||||
update_mm(mm, tsk);
|
||||
cpu_set(smp_processor_id(), mm->cpu_vm_mask);
|
||||
preempt_enable();
|
||||
rc = 0;
|
||||
unlock:
|
||||
task_unlock(tsk);
|
||||
return rc;
|
||||
mmput(old_mm);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(s390_enable_sie);
|
||||
|
|
|
@ -200,7 +200,6 @@ int __pit_timer_fn(struct kvm_kpit_state *ps)
|
|||
|
||||
atomic_inc(&pt->pending);
|
||||
smp_mb__after_atomic_inc();
|
||||
/* FIXME: handle case where the guest is in guest mode */
|
||||
if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
|
||||
vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
wake_up_interruptible(&vcpu0->wq);
|
||||
|
@ -237,6 +236,19 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
|
|||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
|
||||
struct hrtimer *timer;
|
||||
|
||||
if (vcpu->vcpu_id != 0 || !pit)
|
||||
return;
|
||||
|
||||
timer = &pit->pit_state.pit_timer.timer;
|
||||
if (hrtimer_cancel(timer))
|
||||
hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
|
||||
}
|
||||
|
||||
static void destroy_pit_timer(struct kvm_kpit_timer *pt)
|
||||
{
|
||||
pr_debug("pit: execute del timer!\n");
|
||||
|
|
|
@ -94,3 +94,9 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
|
|||
/* TODO: PIT, RTC etc. */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
|
||||
|
||||
void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__kvm_migrate_apic_timer(vcpu);
|
||||
__kvm_migrate_pit_timer(vcpu);
|
||||
}
|
||||
|
|
|
@ -84,6 +84,8 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
|
|||
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
|
||||
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
|
||||
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
|
||||
void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
|
||||
|
||||
int pit_has_pending_timer(struct kvm_vcpu *vcpu);
|
||||
int apic_has_pending_timer(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -658,7 +658,7 @@ static int is_empty_shadow_page(u64 *spt)
|
|||
u64 *end;
|
||||
|
||||
for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
|
||||
if (*pos != shadow_trap_nonpresent_pte) {
|
||||
if (is_shadow_present_pte(*pos)) {
|
||||
printk(KERN_ERR "%s: %p %llx\n", __func__,
|
||||
pos, *pos);
|
||||
return 0;
|
||||
|
@ -1858,6 +1858,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
|
|||
sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
|
||||
struct kvm_mmu_page, link);
|
||||
kvm_mmu_zap_page(vcpu->kvm, sp);
|
||||
cond_resched();
|
||||
}
|
||||
free_page((unsigned long)vcpu->arch.mmu.pae_root);
|
||||
}
|
||||
|
|
|
@ -418,7 +418,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
|
|||
|
||||
/* mmio */
|
||||
if (is_error_pfn(pfn)) {
|
||||
pgprintk("gfn %x is mmio\n", walker.gfn);
|
||||
pgprintk("gfn %lx is mmio\n", walker.gfn);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -688,7 +688,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
delta = vcpu->arch.host_tsc - tsc_this;
|
||||
svm->vmcb->control.tsc_offset += delta;
|
||||
vcpu->cpu = cpu;
|
||||
kvm_migrate_apic_timer(vcpu);
|
||||
kvm_migrate_timers(vcpu);
|
||||
}
|
||||
|
||||
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
|
||||
|
|
|
@ -608,7 +608,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
|
||||
if (vcpu->cpu != cpu) {
|
||||
vcpu_clear(vmx);
|
||||
kvm_migrate_apic_timer(vcpu);
|
||||
kvm_migrate_timers(vcpu);
|
||||
vpid_sync_vcpu_all(vmx);
|
||||
}
|
||||
|
||||
|
@ -1036,6 +1036,7 @@ static void hardware_enable(void *garbage)
|
|||
static void hardware_disable(void *garbage)
|
||||
{
|
||||
asm volatile (ASM_VMX_VMXOFF : : : "cc");
|
||||
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
||||
}
|
||||
|
||||
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
|
||||
|
|
|
@ -2758,7 +2758,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
|
||||
if (vcpu->requests) {
|
||||
if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
|
||||
__kvm_migrate_apic_timer(vcpu);
|
||||
__kvm_migrate_timers(vcpu);
|
||||
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
|
||||
&vcpu->requests)) {
|
||||
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
|
||||
|
|
|
@ -1727,7 +1727,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
|
|||
if (rc)
|
||||
goto done;
|
||||
|
||||
kvm_emulate_hypercall(ctxt->vcpu);
|
||||
/* Let the processor re-execute the fixed hypercall */
|
||||
c->eip = ctxt->vcpu->arch.rip;
|
||||
/* Disable writeback. */
|
||||
c->dst.type = OP_NONE;
|
||||
break;
|
||||
|
|
|
@ -207,6 +207,7 @@ s390_handle_mcck(void)
|
|||
do_exit(SIGSEGV);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(s390_handle_mcck);
|
||||
|
||||
/*
|
||||
* returns 0 if all registers could be validated
|
||||
|
|
|
@ -57,6 +57,7 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
|
||||
extern int kvmppc_emulate_instruction(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
|
||||
u64 asid, u32 flags);
|
||||
|
|
|
@ -297,7 +297,7 @@ static inline gpa_t gfn_to_gpa(gfn_t gfn)
|
|||
return (gpa_t)gfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
|
||||
static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
#else
|
||||
#define ioapic_debug(fmt, arg...)
|
||||
#endif
|
||||
static void ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
|
||||
static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
|
||||
|
||||
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
|
||||
unsigned long addr,
|
||||
|
@ -89,8 +89,8 @@ static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
|
|||
pent = &ioapic->redirtbl[idx];
|
||||
|
||||
if (!pent->fields.mask) {
|
||||
ioapic_deliver(ioapic, idx);
|
||||
if (pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
|
||||
int injected = ioapic_deliver(ioapic, idx);
|
||||
if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
|
||||
pent->fields.remote_irr = 1;
|
||||
}
|
||||
if (!pent->fields.trig_mode)
|
||||
|
@ -133,7 +133,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
|||
}
|
||||
}
|
||||
|
||||
static void ioapic_inj_irq(struct kvm_ioapic *ioapic,
|
||||
static int ioapic_inj_irq(struct kvm_ioapic *ioapic,
|
||||
struct kvm_vcpu *vcpu,
|
||||
u8 vector, u8 trig_mode, u8 delivery_mode)
|
||||
{
|
||||
|
@ -143,7 +143,7 @@ static void ioapic_inj_irq(struct kvm_ioapic *ioapic,
|
|||
ASSERT((delivery_mode == IOAPIC_FIXED) ||
|
||||
(delivery_mode == IOAPIC_LOWEST_PRIORITY));
|
||||
|
||||
kvm_apic_set_irq(vcpu, vector, trig_mode);
|
||||
return kvm_apic_set_irq(vcpu, vector, trig_mode);
|
||||
}
|
||||
|
||||
static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
|
||||
|
@ -186,7 +186,7 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
|
|||
return mask;
|
||||
}
|
||||
|
||||
static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
||||
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
||||
{
|
||||
u8 dest = ioapic->redirtbl[irq].fields.dest_id;
|
||||
u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode;
|
||||
|
@ -195,7 +195,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
|||
u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
|
||||
u32 deliver_bitmask;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int vcpu_id;
|
||||
int vcpu_id, r = 0;
|
||||
|
||||
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
|
||||
"vector=%x trig_mode=%x\n",
|
||||
|
@ -204,7 +204,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
|||
deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode);
|
||||
if (!deliver_bitmask) {
|
||||
ioapic_debug("no target on destination\n");
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (delivery_mode) {
|
||||
|
@ -216,7 +216,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
|||
vcpu = ioapic->kvm->vcpus[0];
|
||||
#endif
|
||||
if (vcpu != NULL)
|
||||
ioapic_inj_irq(ioapic, vcpu, vector,
|
||||
r = ioapic_inj_irq(ioapic, vcpu, vector,
|
||||
trig_mode, delivery_mode);
|
||||
else
|
||||
ioapic_debug("null lowest prio vcpu: "
|
||||
|
@ -234,7 +234,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
|||
deliver_bitmask &= ~(1 << vcpu_id);
|
||||
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
||||
if (vcpu) {
|
||||
ioapic_inj_irq(ioapic, vcpu, vector,
|
||||
r = ioapic_inj_irq(ioapic, vcpu, vector,
|
||||
trig_mode, delivery_mode);
|
||||
}
|
||||
}
|
||||
|
@ -246,6 +246,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
|||
delivery_mode);
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
|
||||
|
|
Loading…
Reference in New Issue
Block a user