Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b35c359 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull kvm bugfixes from Gleb Natapov:
 "The bulk of the fixes is in MIPS KVM kernel<->userspace ABI.  MIPS KVM
  is new for 3.10 and some problems were found with current ABI.  It is
  better to fix them now and do not have a kernel with broken one"

* 'fixes' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: Fix race in apic->pending_events processing
  KVM: fix sil/dil/bpl/spl in the mod/rm fields
  KVM: Emulate multibyte NOP
  ARM: KVM: be more thorough when invalidating TLBs
  ARM: KVM: prevent NULL pointer dereferences with KVM VCPU ioctl
  mips/kvm: Use ENOIOCTLCMD to indicate unimplemented ioctls.
  mips/kvm: Fix ABI by moving manipulation of CP0 registers to KVM_{G,S}ET_ONE_REG
  mips/kvm: Use ARRAY_SIZE() instead of hardcoded constants in kvm_arch_vcpu_ioctl_{s,g}et_regs
  mips/kvm: Fix name of gpr field in struct kvm_regs.
  mips/kvm: Fix ABI for use of 64-bit registers.
  mips/kvm: Fix ABI for use of FPU.
parents 6f66f900 299018f4
Loading
Loading
Loading
Loading
+13 −2
Original line number Diff line number Diff line
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu)
	wait_event_interruptible(*wq, !vcpu->arch.pause);
}

static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.target >= 0;
}

/**
 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
 * @vcpu:	The VCPU pointer
@@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
	int ret;
	sigset_t sigsaved;

	/* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
	if (unlikely(vcpu->arch.target < 0))
	if (unlikely(!kvm_vcpu_initialized(vcpu)))
		return -ENOEXEC;

	ret = kvm_vcpu_first_run_init(vcpu);
@@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
		struct kvm_one_reg reg;

		if (unlikely(!kvm_vcpu_initialized(vcpu)))
			return -ENOEXEC;

		if (copy_from_user(&reg, argp, sizeof(reg)))
			return -EFAULT;
		if (ioctl == KVM_SET_ONE_REG)
@@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
		struct kvm_reg_list reg_list;
		unsigned n;

		if (unlikely(!kvm_vcpu_initialized(vcpu)))
			return -ENOEXEC;

		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
			return -EFAULT;
		n = reg_list.n;
+26 −15
Original line number Diff line number Diff line
@@ -43,6 +43,13 @@ static phys_addr_t hyp_idmap_vector;

static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
	/*
	 * This function also gets called when dealing with HYP page
	 * tables. As HYP doesn't have an associated struct kvm (and
	 * the HYP page tables are fairly static), we don't do
	 * anything there.
	 */
	if (kvm)
		kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}

@@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
	return p;
}

static void clear_pud_entry(pud_t *pud)
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
	pmd_t *pmd_table = pmd_offset(pud, 0);
	pud_clear(pud);
	kvm_tlb_flush_vmid_ipa(kvm, addr);
	pmd_free(NULL, pmd_table);
	put_page(virt_to_page(pud));
}

static void clear_pmd_entry(pmd_t *pmd)
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
{
	pte_t *pte_table = pte_offset_kernel(pmd, 0);
	pmd_clear(pmd);
	kvm_tlb_flush_vmid_ipa(kvm, addr);
	pte_free_kernel(NULL, pte_table);
	put_page(virt_to_page(pmd));
}
@@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd)
	return page_count(pmd_page) == 1;
}

static void clear_pte_entry(pte_t *pte)
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
{
	if (pte_present(*pte)) {
		kvm_set_pte(pte, __pte(0));
		put_page(virt_to_page(pte));
		kvm_tlb_flush_vmid_ipa(kvm, addr);
	}
}

@@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte)
	return page_count(pte_page) == 1;
}

static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
			unsigned long long start, u64 size)
{
	pgd_t *pgd;
	pud_t *pud;
@@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
		}

		pte = pte_offset_kernel(pmd, addr);
		clear_pte_entry(pte);
		clear_pte_entry(kvm, pte, addr);
		range = PAGE_SIZE;

		/* If we emptied the pte, walk back up the ladder */
		if (pte_empty(pte)) {
			clear_pmd_entry(pmd);
			clear_pmd_entry(kvm, pmd, addr);
			range = PMD_SIZE;
			if (pmd_empty(pmd)) {
				clear_pud_entry(pud);
				clear_pud_entry(kvm, pud, addr);
				range = PUD_SIZE;
			}
		}
@@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void)
	mutex_lock(&kvm_hyp_pgd_mutex);

	if (boot_hyp_pgd) {
		unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
		unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
		unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
		unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
		kfree(boot_hyp_pgd);
		boot_hyp_pgd = NULL;
	}

	if (hyp_pgd)
		unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
		unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);

	kfree(init_bounce_page);
	init_bounce_page = NULL;
@@ -200,9 +211,10 @@ void free_hyp_pgds(void)

	if (hyp_pgd) {
		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
			unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
			unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);

		kfree(hyp_pgd);
		hyp_pgd = NULL;
	}
@@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
 */
static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
{
	unmap_range(kvm->arch.pgd, start, size);
	unmap_range(kvm, kvm->arch.pgd, start, size);
}

/**
@@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm,
static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
	unmap_stage2_range(kvm, gpa, PAGE_SIZE);
	kvm_tlb_flush_vmid_ipa(kvm, gpa);
}

int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+0 −4
Original line number Diff line number Diff line
@@ -496,10 +496,6 @@ struct kvm_mips_callbacks {
			    uint32_t cause);
	int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
			  uint32_t cause);
	int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
				    struct kvm_regs *regs);
	int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
				    struct kvm_regs *regs);
};
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+110 −27
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
 * for more details.
 *
 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 * Copyright (C) 2013 Cavium, Inc.
 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 */

@@ -12,44 +13,126 @@

#include <linux/types.h>

#define __KVM_MIPS

#define N_MIPS_COPROC_REGS      32
#define N_MIPS_COPROC_SEL   	8
/*
 * KVM MIPS specific structures and definitions.
 *
 * Some parts derived from the x86 version of this file.
 */

/* for KVM_GET_REGS and KVM_SET_REGS */
/*
 * for KVM_GET_REGS and KVM_SET_REGS
 *
 * If Config[AT] is zero (32-bit CPU), the register contents are
 * stored in the lower 32-bits of the struct kvm_regs fields and sign
 * extended to 64-bits.
 */
struct kvm_regs {
	__u32 gprs[32];
	__u32 hi;
	__u32 lo;
	__u32 pc;

	__u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
	__u64 gpr[32];
	__u64 hi;
	__u64 lo;
	__u64 pc;
};

/* for KVM_GET_SREGS and KVM_SET_SREGS */
struct kvm_sregs {
};

/* for KVM_GET_FPU and KVM_SET_FPU */
/*
 * for KVM_GET_FPU and KVM_SET_FPU
 *
 * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
 * are zero filled.
 */
struct kvm_fpu {
	__u64 fpr[32];
	__u32 fir;
	__u32 fccr;
	__u32 fexr;
	__u32 fenr;
	__u32 fcsr;
	__u32 pad;
};


/*
 * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
 * registers.  The id field is broken down as follows:
 *
 *  bits[2..0]   - Register 'sel' index.
 *  bits[7..3]   - Register 'rd'  index.
 *  bits[15..8]  - Must be zero.
 *  bits[63..16] - 1 -> CP0 registers.
 *
 * Other sets registers may be added in the future.  Each set would
 * have its own identifier in bits[63..16].
 *
 * The addr field of struct kvm_one_reg must point to an aligned
 * 64-bit wide location.  For registers that are narrower than
 * 64-bits, the value is stored in the low order bits of the location,
 * and sign extended to 64-bits.
 *
 * The registers defined in struct kvm_regs are also accessible, the
 * id values for these are below.
 */

#define KVM_REG_MIPS_R0 0
#define KVM_REG_MIPS_R1 1
#define KVM_REG_MIPS_R2 2
#define KVM_REG_MIPS_R3 3
#define KVM_REG_MIPS_R4 4
#define KVM_REG_MIPS_R5 5
#define KVM_REG_MIPS_R6 6
#define KVM_REG_MIPS_R7 7
#define KVM_REG_MIPS_R8 8
#define KVM_REG_MIPS_R9 9
#define KVM_REG_MIPS_R10 10
#define KVM_REG_MIPS_R11 11
#define KVM_REG_MIPS_R12 12
#define KVM_REG_MIPS_R13 13
#define KVM_REG_MIPS_R14 14
#define KVM_REG_MIPS_R15 15
#define KVM_REG_MIPS_R16 16
#define KVM_REG_MIPS_R17 17
#define KVM_REG_MIPS_R18 18
#define KVM_REG_MIPS_R19 19
#define KVM_REG_MIPS_R20 20
#define KVM_REG_MIPS_R21 21
#define KVM_REG_MIPS_R22 22
#define KVM_REG_MIPS_R23 23
#define KVM_REG_MIPS_R24 24
#define KVM_REG_MIPS_R25 25
#define KVM_REG_MIPS_R26 26
#define KVM_REG_MIPS_R27 27
#define KVM_REG_MIPS_R28 28
#define KVM_REG_MIPS_R29 29
#define KVM_REG_MIPS_R30 30
#define KVM_REG_MIPS_R31 31

#define KVM_REG_MIPS_HI 32
#define KVM_REG_MIPS_LO 33
#define KVM_REG_MIPS_PC 34

/*
 * KVM MIPS specific structures and definitions
 *
 */
struct kvm_debug_exit_arch {
	__u64 epc;
};

/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
};

/* definition of registers in kvm_run */
struct kvm_sync_regs {
};

/* dummy definition */
struct kvm_sregs {
};

struct kvm_mips_interrupt {
	/* in */
	__u32 cpu;
	__u32 irq;
};

/* definition of registers in kvm_run */
struct kvm_sync_regs {
};

#endif /* __LINUX_KVM_MIPS_H */
+259 −21
Original line number Diff line number Diff line
@@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
long
kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
	return -EINVAL;
	return -ENOIOCTLCMD;
}

void kvm_arch_free_memslot(struct kvm_memory_slot *free,
@@ -401,7 +401,7 @@ int
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
				    struct kvm_guest_debug *dbg)
{
	return -EINVAL;
	return -ENOIOCTLCMD;
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -475,15 +475,224 @@ int
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				struct kvm_mp_state *mp_state)
{
	return -EINVAL;
	return -ENOIOCTLCMD;
}

int
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				struct kvm_mp_state *mp_state)
{
	return -ENOIOCTLCMD;
}

#define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0)
#define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0)
#define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0)
#define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0)
#define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2)
#define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0)
#define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1)
#define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0)
#define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0)
#define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0)
#define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0)
#define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0)
#define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0)
#define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0)
#define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0)
#define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1)
#define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0)
#define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1)
#define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2)
#define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3)
#define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7)
#define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0)
#define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0)

static u64 kvm_mips_get_one_regs[] = {
	KVM_REG_MIPS_R0,
	KVM_REG_MIPS_R1,
	KVM_REG_MIPS_R2,
	KVM_REG_MIPS_R3,
	KVM_REG_MIPS_R4,
	KVM_REG_MIPS_R5,
	KVM_REG_MIPS_R6,
	KVM_REG_MIPS_R7,
	KVM_REG_MIPS_R8,
	KVM_REG_MIPS_R9,
	KVM_REG_MIPS_R10,
	KVM_REG_MIPS_R11,
	KVM_REG_MIPS_R12,
	KVM_REG_MIPS_R13,
	KVM_REG_MIPS_R14,
	KVM_REG_MIPS_R15,
	KVM_REG_MIPS_R16,
	KVM_REG_MIPS_R17,
	KVM_REG_MIPS_R18,
	KVM_REG_MIPS_R19,
	KVM_REG_MIPS_R20,
	KVM_REG_MIPS_R21,
	KVM_REG_MIPS_R22,
	KVM_REG_MIPS_R23,
	KVM_REG_MIPS_R24,
	KVM_REG_MIPS_R25,
	KVM_REG_MIPS_R26,
	KVM_REG_MIPS_R27,
	KVM_REG_MIPS_R28,
	KVM_REG_MIPS_R29,
	KVM_REG_MIPS_R30,
	KVM_REG_MIPS_R31,

	KVM_REG_MIPS_HI,
	KVM_REG_MIPS_LO,
	KVM_REG_MIPS_PC,

	KVM_REG_MIPS_CP0_INDEX,
	KVM_REG_MIPS_CP0_CONTEXT,
	KVM_REG_MIPS_CP0_PAGEMASK,
	KVM_REG_MIPS_CP0_WIRED,
	KVM_REG_MIPS_CP0_BADVADDR,
	KVM_REG_MIPS_CP0_ENTRYHI,
	KVM_REG_MIPS_CP0_STATUS,
	KVM_REG_MIPS_CP0_CAUSE,
	/* EPC set via kvm_regs, et al. */
	KVM_REG_MIPS_CP0_CONFIG,
	KVM_REG_MIPS_CP0_CONFIG1,
	KVM_REG_MIPS_CP0_CONFIG2,
	KVM_REG_MIPS_CP0_CONFIG3,
	KVM_REG_MIPS_CP0_CONFIG7,
	KVM_REG_MIPS_CP0_ERROREPC
};

static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
			    const struct kvm_one_reg *reg)
{
	u64 __user *uaddr = (u64 __user *)(long)reg->addr;

	struct mips_coproc *cop0 = vcpu->arch.cop0;
	s64 v;

	switch (reg->id) {
	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
		break;
	case KVM_REG_MIPS_HI:
		v = (long)vcpu->arch.hi;
		break;
	case KVM_REG_MIPS_LO:
		v = (long)vcpu->arch.lo;
		break;
	case KVM_REG_MIPS_PC:
		v = (long)vcpu->arch.pc;
		break;

	case KVM_REG_MIPS_CP0_INDEX:
		v = (long)kvm_read_c0_guest_index(cop0);
		break;
	case KVM_REG_MIPS_CP0_CONTEXT:
		v = (long)kvm_read_c0_guest_context(cop0);
		break;
	case KVM_REG_MIPS_CP0_PAGEMASK:
		v = (long)kvm_read_c0_guest_pagemask(cop0);
		break;
	case KVM_REG_MIPS_CP0_WIRED:
		v = (long)kvm_read_c0_guest_wired(cop0);
		break;
	case KVM_REG_MIPS_CP0_BADVADDR:
		v = (long)kvm_read_c0_guest_badvaddr(cop0);
		break;
	case KVM_REG_MIPS_CP0_ENTRYHI:
		v = (long)kvm_read_c0_guest_entryhi(cop0);
		break;
	case KVM_REG_MIPS_CP0_STATUS:
		v = (long)kvm_read_c0_guest_status(cop0);
		break;
	case KVM_REG_MIPS_CP0_CAUSE:
		v = (long)kvm_read_c0_guest_cause(cop0);
		break;
	case KVM_REG_MIPS_CP0_ERROREPC:
		v = (long)kvm_read_c0_guest_errorepc(cop0);
		break;
	case KVM_REG_MIPS_CP0_CONFIG:
		v = (long)kvm_read_c0_guest_config(cop0);
		break;
	case KVM_REG_MIPS_CP0_CONFIG1:
		v = (long)kvm_read_c0_guest_config1(cop0);
		break;
	case KVM_REG_MIPS_CP0_CONFIG2:
		v = (long)kvm_read_c0_guest_config2(cop0);
		break;
	case KVM_REG_MIPS_CP0_CONFIG3:
		v = (long)kvm_read_c0_guest_config3(cop0);
		break;
	case KVM_REG_MIPS_CP0_CONFIG7:
		v = (long)kvm_read_c0_guest_config7(cop0);
		break;
	default:
		return -EINVAL;
	}
	return put_user(v, uaddr);
}

static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
			    const struct kvm_one_reg *reg)
{
	u64 __user *uaddr = (u64 __user *)(long)reg->addr;
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	u64 v;

	if (get_user(v, uaddr) != 0)
		return -EFAULT;

	switch (reg->id) {
	case KVM_REG_MIPS_R0:
		/* Silently ignore requests to set $0 */
		break;
	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
		break;
	case KVM_REG_MIPS_HI:
		vcpu->arch.hi = v;
		break;
	case KVM_REG_MIPS_LO:
		vcpu->arch.lo = v;
		break;
	case KVM_REG_MIPS_PC:
		vcpu->arch.pc = v;
		break;

	case KVM_REG_MIPS_CP0_INDEX:
		kvm_write_c0_guest_index(cop0, v);
		break;
	case KVM_REG_MIPS_CP0_CONTEXT:
		kvm_write_c0_guest_context(cop0, v);
		break;
	case KVM_REG_MIPS_CP0_PAGEMASK:
		kvm_write_c0_guest_pagemask(cop0, v);
		break;
	case KVM_REG_MIPS_CP0_WIRED:
		kvm_write_c0_guest_wired(cop0, v);
		break;
	case KVM_REG_MIPS_CP0_BADVADDR:
		kvm_write_c0_guest_badvaddr(cop0, v);
		break;
	case KVM_REG_MIPS_CP0_ENTRYHI:
		kvm_write_c0_guest_entryhi(cop0, v);
		break;
	case KVM_REG_MIPS_CP0_STATUS:
		kvm_write_c0_guest_status(cop0, v);
		break;
	case KVM_REG_MIPS_CP0_CAUSE:
		kvm_write_c0_guest_cause(cop0, v);
		break;
	case KVM_REG_MIPS_CP0_ERROREPC:
		kvm_write_c0_guest_errorepc(cop0, v);
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

long
kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
@@ -491,9 +700,38 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
	long r;
	int intr;

	switch (ioctl) {
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
		struct kvm_one_reg reg;
		if (copy_from_user(&reg, argp, sizeof(reg)))
			return -EFAULT;
		if (ioctl == KVM_SET_ONE_REG)
			return kvm_mips_set_reg(vcpu, &reg);
		else
			return kvm_mips_get_reg(vcpu, &reg);
	}
	case KVM_GET_REG_LIST: {
		struct kvm_reg_list __user *user_list = argp;
		u64 __user *reg_dest;
		struct kvm_reg_list reg_list;
		unsigned n;

		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
			return -EFAULT;
		n = reg_list.n;
		reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
			return -EFAULT;
		if (n < reg_list.n)
			return -E2BIG;
		reg_dest = user_list->reg;
		if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
				 sizeof(kvm_mips_get_one_regs)))
			return -EFAULT;
		return 0;
	}
	case KVM_NMI:
		/* Treat the NMI as a CPU reset */
		r = kvm_mips_reset_vcpu(vcpu);
@@ -505,8 +743,6 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
			if (copy_from_user(&irq, argp, sizeof(irq)))
				goto out;

			intr = (int)irq.irq;

			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
				  irq.irq);

@@ -514,7 +750,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
			break;
		}
	default:
		r = -EINVAL;
		r = -ENOIOCTLCMD;
	}

out:
@@ -565,7 +801,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)

	switch (ioctl) {
	default:
		r = -EINVAL;
		r = -ENOIOCTLCMD;
	}

	return r;
@@ -593,13 +829,13 @@ void kvm_arch_exit(void)
int
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
	return -ENOTSUPP;
	return -ENOIOCTLCMD;
}

int
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
	return -ENOTSUPP;
	return -ENOIOCTLCMD;
}

int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -609,12 +845,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	return -ENOTSUPP;
	return -ENOIOCTLCMD;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	return -ENOTSUPP;
	return -ENOIOCTLCMD;
}

int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
@@ -627,6 +863,9 @@ int kvm_dev_ioctl_check_extension(long ext)
	int r;

	switch (ext) {
	case KVM_CAP_ONE_REG:
		r = 1;
		break;
	case KVM_CAP_COALESCED_MMIO:
		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
		break;
@@ -635,7 +874,6 @@ int kvm_dev_ioctl_check_extension(long ext)
		break;
	}
	return r;

}

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -677,28 +915,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	int i;

	for (i = 0; i < 32; i++)
		vcpu->arch.gprs[i] = regs->gprs[i];

	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
		vcpu->arch.gprs[i] = regs->gpr[i];
	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
	vcpu->arch.hi = regs->hi;
	vcpu->arch.lo = regs->lo;
	vcpu->arch.pc = regs->pc;

	return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
	return 0;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	int i;

	for (i = 0; i < 32; i++)
		regs->gprs[i] = vcpu->arch.gprs[i];
	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
		regs->gpr[i] = vcpu->arch.gprs[i];

	regs->hi = vcpu->arch.hi;
	regs->lo = vcpu->arch.lo;
	regs->pc = vcpu->arch.pc;

	return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
	return 0;
}

void kvm_mips_comparecount_func(unsigned long data)
Loading