Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d116e812 authored by Deng-Cheng Zhu's avatar Deng-Cheng Zhu Committed by Paolo Bonzini
Browse files

MIPS: KVM: Reformat code and comments



No logic changes inside.

Signed-off-by: default avatarDeng-Cheng Zhu <dengcheng.zhu@imgtec.com>
Reviewed-by: default avatarJames Hogan <james.hogan@imgtec.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 85949977
Loading
Loading
Loading
Loading
+7 −3
Original line number Original line Diff line number Diff line
@@ -366,6 +366,10 @@ enum emulation_result {
#define TLB_IS_VALID(x, va)	(((va) & (1 << PAGE_SHIFT))		\
#define TLB_IS_VALID(x, va)	(((va) & (1 << PAGE_SHIFT))		\
				 ? ((x).tlb_lo1 & MIPS3_PG_V)		\
				 ? ((x).tlb_lo1 & MIPS3_PG_V)		\
				 : ((x).tlb_lo0 & MIPS3_PG_V))
				 : ((x).tlb_lo0 & MIPS3_PG_V))
#define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
				 ((y) & VPN2_MASK & ~(x).tlb_mask))
#define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
				 TLB_ASID(x) == ((y) & ASID_MASK))


struct kvm_mips_tlb {
struct kvm_mips_tlb {
	long tlb_mask;
	long tlb_mask;
+3 −0
Original line number Original line Diff line number Diff line
@@ -19,6 +19,9 @@
#include <asm/mipsmtregs.h>
#include <asm/mipsmtregs.h>
#include <asm/uaccess.h> /* for segment_eq() */
#include <asm/uaccess.h> /* for segment_eq() */


extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void);

/*
/*
 * This macro return a properly sign-extended address suitable as base address
 * This macro return a properly sign-extended address suitable as base address
 * for indexed cache operations.  Two issues here:
 * for indexed cache operations.  Two issues here:
+31 −24
Original line number Original line Diff line number Diff line
@@ -16,7 +16,6 @@
#include <asm/stackframe.h>
#include <asm/stackframe.h>
#include <asm/asm-offsets.h>
#include <asm/asm-offsets.h>



#define _C_LABEL(x)     x
#define _C_LABEL(x)     x
#define MIPSX(name)     mips32_ ## name
#define MIPSX(name)     mips32_ ## name
#define CALLFRAME_SIZ   32
#define CALLFRAME_SIZ   32
@@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run)
	LONG_S	$24, PT_R24(k1)
	LONG_S	$24, PT_R24(k1)
	LONG_S	$25, PT_R25(k1)
	LONG_S	$25, PT_R25(k1)


	/* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
	/*
	 * XXXKYMA k0/k1 not saved, not being used if we got here through
	 * an ioctl()
	 */


	LONG_S	$28, PT_R28(k1)
	LONG_S	$28, PT_R28(k1)
	LONG_S	$29, PT_R29(k1)
	LONG_S	$29, PT_R29(k1)
@@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run)
	/* Save the kernel gp as well */
	/* Save the kernel gp as well */
	LONG_S	gp, VCPU_HOST_GP(k1)
	LONG_S	gp, VCPU_HOST_GP(k1)


	/* Setup status register for running the guest in UM, interrupts are disabled */
	/*
	 * Setup status register for running the guest in UM, interrupts
	 * are disabled
	 */
	li	k0, (ST0_EXL | KSU_USER | ST0_BEV)
	li	k0, (ST0_EXL | KSU_USER | ST0_BEV)
	mtc0	k0, CP0_STATUS
	mtc0	k0, CP0_STATUS
	ehb
	ehb
@@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run)
	mtc0	k0, CP0_STATUS
	mtc0	k0, CP0_STATUS
	ehb
	ehb



	/* Set Guest EPC */
	/* Set Guest EPC */
	LONG_L	t0, VCPU_PC(k1)
	LONG_L	t0, VCPU_PC(k1)
	mtc0	t0, CP0_EPC
	mtc0	t0, CP0_EPC
@@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1)
	eret
	eret


VECTOR(MIPSX(exception), unknown)
VECTOR(MIPSX(exception), unknown)
/*
/* Find out what mode we came from and jump to the proper handler. */
 * Find out what mode we came from and jump to the proper handler.
 */
	mtc0	k0, CP0_ERROREPC	#01: Save guest k0
	mtc0	k0, CP0_ERROREPC	#01: Save guest k0
	ehb				#02:
	ehb				#02:


@@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown)
	INT_SRL	k0, k0, 10		#03: Get rid of CPUNum
	INT_SRL	k0, k0, 10		#03: Get rid of CPUNum
	INT_SLL	k0, k0, 10		#04
	INT_SLL	k0, k0, 10		#04
	LONG_S	k1, 0x3000(k0)		#05: Save k1 @ offset 0x3000
	LONG_S	k1, 0x3000(k0)		#05: Save k1 @ offset 0x3000
	INT_ADDIU k0, k0, 0x2000		#06: Exception handler is installed @ offset 0x2000
	INT_ADDIU k0, k0, 0x2000	#06: Exception handler is
					#    installed @ offset 0x2000
	j	k0			#07: jump to the function
	j	k0			#07: jump to the function
	 nop				#08: branch delay slot
	 nop				#08: branch delay slot
VECTOR_END(MIPSX(exceptionEnd))
VECTOR_END(MIPSX(exceptionEnd))
@@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd))
/*
/*
 * Generic Guest exception handler. We end up here when the guest
 * Generic Guest exception handler. We end up here when the guest
 * does something that causes a trap to kernel mode.
 * does something that causes a trap to kernel mode.
 *
 */
 */
NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
	/* Get the VCPU pointer from DDTATA_LO */
	/* Get the VCPU pointer from DDTATA_LO */
@@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
	LONG_S	$30, VCPU_R30(k1)
	LONG_S	$30, VCPU_R30(k1)
	LONG_S	$31, VCPU_R31(k1)
	LONG_S	$31, VCPU_R31(k1)


	/* We need to save hi/lo and restore them on
	/* We need to save hi/lo and restore them on the way out */
	 * the way out
	 */
	mfhi	t0
	mfhi	t0
	LONG_S	t0, VCPU_HI(k1)
	LONG_S	t0, VCPU_HI(k1)


@@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
	/* Save pointer to run in s0, will be saved by the compiler */
	/* Save pointer to run in s0, will be saved by the compiler */
	move	s0, a0
	move	s0, a0


	/* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
	/*
	 * process the exception */
	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
	 * process the exception
	 */
	mfc0	k0,CP0_EPC
	mfc0	k0,CP0_EPC
	LONG_S	k0, VCPU_PC(k1)
	LONG_S	k0, VCPU_PC(k1)


@@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
	LONG_L	k0, VCPU_HOST_EBASE(k1)
	LONG_L	k0, VCPU_HOST_EBASE(k1)
	mtc0	k0,CP0_EBASE
	mtc0	k0,CP0_EBASE



	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
	.set	at
	.set	at
	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
@@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
	/* Saved host state */
	/* Saved host state */
	INT_ADDIU sp, sp, -PT_SIZE
	INT_ADDIU sp, sp, -PT_SIZE


	/* XXXKYMA do we need to load the host ASID, maybe not because the
	/*
	 * XXXKYMA do we need to load the host ASID, maybe not because the
	 * kernel entries are marked GLOBAL, need to verify
	 * kernel entries are marked GLOBAL, need to verify
	 */
	 */


@@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)


	/* Jump to handler */
	/* Jump to handler */
FEXPORT(__kvm_mips_jump_to_handler)
FEXPORT(__kvm_mips_jump_to_handler)
	/* XXXKYMA: not sure if this is safe, how large is the stack??
	/*
	 * XXXKYMA: not sure if this is safe, how large is the stack??
	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
	 * with this in the kernel */
	 * with this in the kernel
	 */
	PTR_LA	t9, kvm_mips_handle_exit
	PTR_LA	t9, kvm_mips_handle_exit
	jalr.hb	t9
	jalr.hb	t9
	 INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
	 INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
@@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
	di
	di
	ehb
	ehb


	/* XXXKYMA: k0/k1 could have been blown away if we processed
	/*
	 * XXXKYMA: k0/k1 could have been blown away if we processed
	 * an exception while we were handling the exception from the
	 * an exception while we were handling the exception from the
	 * guest, reload k1
	 * guest, reload k1
	 */
	 */
@@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
	move	k1, s1
	move	k1, s1
	INT_ADDIU k1, k1, VCPU_HOST_ARCH
	INT_ADDIU k1, k1, VCPU_HOST_ARCH


	/* Check return value, should tell us if we are returning to the
	/*
	 * Check return value, should tell us if we are returning to the
	 * host (handle I/O etc)or resuming the guest
	 * host (handle I/O etc)or resuming the guest
	 */
	 */
	andi	t0, v0, RESUME_HOST
	andi	t0, v0, RESUME_HOST
@@ -521,8 +527,10 @@ __kvm_mips_return_to_host:
	LONG_L	$0, PT_R0(k1)
	LONG_L	$0, PT_R0(k1)
	LONG_L	$1, PT_R1(k1)
	LONG_L	$1, PT_R1(k1)


	/* r2/v0 is the return code, shift it down by 2 (arithmetic)
	/*
	 * to recover the err code  */
	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
	 * to recover the err code
	 */
	INT_SRA	k0, v0, 2
	INT_SRA	k0, v0, 2
	move	$2, k0
	move	$2, k0


@@ -566,7 +574,6 @@ __kvm_mips_return_to_host:
	PTR_LI	k0, 0x2000000F
	PTR_LI	k0, 0x2000000F
	mtc0	k0,  CP0_HWRENA
	mtc0	k0,  CP0_HWRENA



	/* Restore RA, which is the address we will return to */
	/* Restore RA, which is the address we will return to */
	LONG_L  ra, PT_R31(k1)
	LONG_L  ra, PT_R31(k1)
	j       ra
	j       ra
+90 −89
Original line number Original line Diff line number Diff line
@@ -31,38 +31,41 @@
#define VECTORSPACING 0x100	/* for EI/VI mode */
#define VECTORSPACING 0x100	/* for EI/VI mode */
#endif
#endif


#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
struct kvm_stats_debugfs_item debugfs_entries[] = {
struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "wait", VCPU_STAT(wait_exits) },
	{ "wait",	  VCPU_STAT(wait_exits),	 KVM_STAT_VCPU },
	{ "cache", VCPU_STAT(cache_exits) },
	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
	{ "signal", VCPU_STAT(signal_exits) },
	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
	{ "interrupt", VCPU_STAT(int_exits) },
	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
	{ "tlbmod", VCPU_STAT(tlbmod_exits) },
	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
	{ "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
	{ "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
	{ "addrerr_st", VCPU_STAT(addrerr_st_exits) },
	{ "addrerr_st",	  VCPU_STAT(addrerr_st_exits),	 KVM_STAT_VCPU },
	{ "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
	{ "addrerr_ld",	  VCPU_STAT(addrerr_ld_exits),	 KVM_STAT_VCPU },
	{ "syscall", VCPU_STAT(syscall_exits) },
	{ "syscall",	  VCPU_STAT(syscall_exits),	 KVM_STAT_VCPU },
	{ "resvd_inst", VCPU_STAT(resvd_inst_exits) },
	{ "resvd_inst",	  VCPU_STAT(resvd_inst_exits),	 KVM_STAT_VCPU },
	{ "break_inst", VCPU_STAT(break_inst_exits) },
	{ "break_inst",	  VCPU_STAT(break_inst_exits),	 KVM_STAT_VCPU },
	{ "flush_dcache", VCPU_STAT(flush_dcache_exits) },
	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
	{NULL}
	{NULL}
};
};


static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
{
{
	int i;
	int i;

	for_each_possible_cpu(i) {
	for_each_possible_cpu(i) {
		vcpu->arch.guest_kernel_asid[i] = 0;
		vcpu->arch.guest_kernel_asid[i] = 0;
		vcpu->arch.guest_user_asid[i] = 0;
		vcpu->arch.guest_user_asid[i] = 0;
	}
	}

	return 0;
	return 0;
}
}


/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
/*
 * are "runnable" if interrupts are pending
 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
 * Config7, so we are "runnable" if interrupts are pending
 */
 */
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
{
@@ -103,7 +106,10 @@ static void kvm_mips_init_tlbs(struct kvm *kvm)
{
{
	unsigned long wired;
	unsigned long wired;


	/* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
	/*
	 * Add a wired entry to the TLB, it is used to map the commpage to
	 * the Guest kernel
	 */
	wired = read_c0_wired();
	wired = read_c0_wired();
	write_c0_wired(wired + 1);
	write_c0_wired(wired + 1);
	mtc0_tlbw_hazard();
	mtc0_tlbw_hazard();
@@ -130,7 +136,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
		on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
		on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
	}
	}



	return 0;
	return 0;
}
}


@@ -185,8 +190,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
	}
	}
}
}


long
long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
			unsigned long arg)
{
{
	return -ENOIOCTLCMD;
	return -ENOIOCTLCMD;
}
}
@@ -246,11 +251,10 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
				  npages, kvm->arch.guest_pmap);
				  npages, kvm->arch.guest_pmap);


			/* Now setup the page table */
			/* Now setup the page table */
			for (i = 0; i < npages; i++) {
			for (i = 0; i < npages; i++)
				kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
				kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
		}
		}
	}
	}
	}
out:
out:
	return;
	return;
}
}
@@ -270,8 +274,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)


struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
{
	extern char mips32_exception[], mips32_exceptionEnd[];
	extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
	int err, size, offset;
	int err, size, offset;
	void *gebase;
	void *gebase;
	int i;
	int i;
@@ -290,14 +292,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)


	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);


	/* Allocate space for host mode exception handlers that handle
	/*
	 * Allocate space for host mode exception handlers that handle
	 * guest mode exits
	 * guest mode exits
	 */
	 */
	if (cpu_has_veic || cpu_has_vint) {
	if (cpu_has_veic || cpu_has_vint)
		size = 0x200 + VECTORSPACING * 64;
		size = 0x200 + VECTORSPACING * 64;
	} else {
	else
		size = 0x4000;
		size = 0x4000;
	}


	/* Save Linux EBASE */
	/* Save Linux EBASE */
	vcpu->arch.host_ebase = (void *)read_c0_ebase();
	vcpu->arch.host_ebase = (void *)read_c0_ebase();
@@ -345,7 +347,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
	local_flush_icache_range((unsigned long)gebase,
	local_flush_icache_range((unsigned long)gebase,
				(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
				(unsigned long)gebase + ALIGN(size, PAGE_SIZE));


	/* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
	/*
	 * Allocate comm page for guest kernel, a TLB will be reserved for
	 * mapping GVA @ 0xFFFF8000 to this page
	 */
	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);


	if (!vcpu->arch.kseg0_commpage) {
	if (!vcpu->arch.kseg0_commpage) {
@@ -391,8 +396,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
	kvm_arch_vcpu_free(vcpu);
	kvm_arch_vcpu_free(vcpu);
}
}


int
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
					struct kvm_guest_debug *dbg)
{
{
	return -ENOIOCTLCMD;
	return -ENOIOCTLCMD;
@@ -430,8 +434,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
	return r;
	return r;
}
}


int
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
			     struct kvm_mips_interrupt *irq)
{
{
	int intr = (int)irq->irq;
	int intr = (int)irq->irq;
	struct kvm_vcpu *dvcpu = NULL;
	struct kvm_vcpu *dvcpu = NULL;
@@ -458,22 +462,19 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)


	dvcpu->arch.wait = 0;
	dvcpu->arch.wait = 0;


	if (waitqueue_active(&dvcpu->wq)) {
	if (waitqueue_active(&dvcpu->wq))
		wake_up_interruptible(&dvcpu->wq);
		wake_up_interruptible(&dvcpu->wq);
	}


	return 0;
	return 0;
}
}


int
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
				    struct kvm_mp_state *mp_state)
{
{
	return -ENOIOCTLCMD;
	return -ENOIOCTLCMD;
}
}


int
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
				    struct kvm_mp_state *mp_state)
{
{
	return -ENOIOCTLCMD;
	return -ENOIOCTLCMD;
@@ -631,10 +632,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
	}
	}
	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;

		return put_user(v, uaddr64);
		return put_user(v, uaddr64);
	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
		u32 v32 = (u32)v;
		u32 v32 = (u32)v;

		return put_user(v32, uaddr32);
		return put_user(v32, uaddr32);
	} else {
	} else {
		return -EINVAL;
		return -EINVAL;
@@ -727,8 +730,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
	return 0;
	return 0;
}
}


long
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
			 unsigned long arg)
{
{
	struct kvm_vcpu *vcpu = filp->private_data;
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
	void __user *argp = (void __user *)arg;
@@ -738,6 +741,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
	case KVM_SET_ONE_REG:
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
	case KVM_GET_ONE_REG: {
		struct kvm_one_reg reg;
		struct kvm_one_reg reg;

		if (copy_from_user(&reg, argp, sizeof(reg)))
		if (copy_from_user(&reg, argp, sizeof(reg)))
			return -EFAULT;
			return -EFAULT;
		if (ioctl == KVM_SET_ONE_REG)
		if (ioctl == KVM_SET_ONE_REG)
@@ -772,6 +776,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
	case KVM_INTERRUPT:
	case KVM_INTERRUPT:
		{
		{
			struct kvm_mips_interrupt irq;
			struct kvm_mips_interrupt irq;

			r = -EFAULT;
			r = -EFAULT;
			if (copy_from_user(&irq, argp, sizeof(irq)))
			if (copy_from_user(&irq, argp, sizeof(irq)))
				goto out;
				goto out;
@@ -790,9 +795,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
	return r;
	return r;
}
}


/*
/* Get (and clear) the dirty memory log for a memory slot. */
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
{
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot *memslot;
@@ -859,14 +862,14 @@ void kvm_arch_exit(void)
	kvm_mips_callbacks = NULL;
	kvm_mips_callbacks = NULL;
}
}


int
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
				  struct kvm_sregs *sregs)
{
{
	return -ENOIOCTLCMD;
	return -ENOIOCTLCMD;
}
}


int
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
				  struct kvm_sregs *sregs)
{
{
	return -ENOIOCTLCMD;
	return -ENOIOCTLCMD;
}
}
@@ -979,14 +982,11 @@ static void kvm_mips_comparecount_func(unsigned long data)
	kvm_mips_callbacks->queue_timer_int(vcpu);
	kvm_mips_callbacks->queue_timer_int(vcpu);


	vcpu->arch.wait = 0;
	vcpu->arch.wait = 0;
	if (waitqueue_active(&vcpu->wq)) {
	if (waitqueue_active(&vcpu->wq))
		wake_up_interruptible(&vcpu->wq);
		wake_up_interruptible(&vcpu->wq);
}
}
}


/*
/* low level hrtimer wake routine */
 * low level hrtimer wake routine.
 */
static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
{
{
	struct kvm_vcpu *vcpu;
	struct kvm_vcpu *vcpu;
@@ -1010,8 +1010,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
	return;
	return;
}
}


int
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
				  struct kvm_translation *tr)
{
{
	return 0;
	return 0;
}
}
@@ -1022,8 +1022,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
	return kvm_mips_callbacks->vcpu_setup(vcpu);
	return kvm_mips_callbacks->vcpu_setup(vcpu);
}
}


static
static void kvm_mips_set_c0_status(void)
void kvm_mips_set_c0_status(void)
{
{
	uint32_t status = read_c0_status();
	uint32_t status = read_c0_status();


@@ -1053,7 +1052,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;
	run->ready_for_interrupt_injection = 1;


	/* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
	/*
	 * Set the appropriate status bits based on host CPU features,
	 * before we hit the scheduler
	 */
	kvm_mips_set_c0_status();
	kvm_mips_set_c0_status();


	local_irq_enable();
	local_irq_enable();
@@ -1061,7 +1063,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
			cause, opc, run, vcpu);
			cause, opc, run, vcpu);


	/* Do a privilege check, if in UM most of these exit conditions end up
	/*
	 * Do a privilege check, if in UM most of these exit conditions end up
	 * causing an exception to be delivered to the Guest Kernel
	 * causing an exception to be delivered to the Guest Kernel
	 */
	 */
	er = kvm_mips_check_privilege(cause, opc, run, vcpu);
	er = kvm_mips_check_privilege(cause, opc, run, vcpu);
@@ -1080,9 +1083,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
		++vcpu->stat.int_exits;
		++vcpu->stat.int_exits;
		trace_kvm_exit(vcpu, INT_EXITS);
		trace_kvm_exit(vcpu, INT_EXITS);


		if (need_resched()) {
		if (need_resched())
			cond_resched();
			cond_resched();
		}


		ret = RESUME_GUEST;
		ret = RESUME_GUEST;
		break;
		break;
@@ -1094,9 +1096,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
		trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
		trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
		/* XXXKYMA: Might need to return to user space */
		/* XXXKYMA: Might need to return to user space */
		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
			ret = RESUME_HOST;
			ret = RESUME_HOST;
		}
		break;
		break;


	case T_TLB_MOD:
	case T_TLB_MOD:
@@ -1106,8 +1107,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
		break;
		break;


	case T_TLB_ST_MISS:
	case T_TLB_ST_MISS:
		kvm_debug
		kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
		    ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
			  badvaddr);
			  badvaddr);


@@ -1156,8 +1156,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
		break;
		break;


	default:
	default:
		kvm_err
		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
		    ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
			exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
			exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
			kvm_read_c0_guest_status(vcpu->arch.cop0));
			kvm_read_c0_guest_status(vcpu->arch.cop0));
		kvm_arch_vcpu_dump_regs(vcpu);
		kvm_arch_vcpu_dump_regs(vcpu);
@@ -1195,11 +1194,13 @@ int __init kvm_mips_init(void)
	if (ret)
	if (ret)
		return ret;
		return ret;


	/* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
	/*
	 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
	 * On MIPS, kernel modules are executed from "mapped space", which
	 * to avoid the possibility of double faulting. The issue is that the TLB code
	 * requires TLBs. The TLB handling code is statically linked with
	 * references routines that are part of the the KVM module,
	 * the rest of the kernel (kvm_tlb.c) to avoid the possibility of
	 * which are only available once the module is loaded.
	 * double faulting. The issue is that the TLB code references
	 * routines that are part of the the KVM module, which are only
	 * available once the module is loaded.
	 */
	 */
	kvm_mips_gfn_to_pfn = gfn_to_pfn;
	kvm_mips_gfn_to_pfn = gfn_to_pfn;
	kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
	kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+11 −10
Original line number Original line Diff line number Diff line
@@ -13,7 +13,8 @@
#define __KVM_MIPS_COMMPAGE_H__
#define __KVM_MIPS_COMMPAGE_H__


struct kvm_mips_commpage {
struct kvm_mips_commpage {
	struct mips_coproc cop0;	/* COP0 state is mapped into Guest kernel via commpage */
	/* COP0 state is mapped into Guest kernel via commpage */
	struct mips_coproc cop0;
};
};


#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
Loading