Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 092670cd authored by Carsten Otte's avatar Carsten Otte Committed by Martin Schwidefsky
Browse files

[S390] Use gmap translation for accessing guest memory



This patch removes kvm-s390 internal assumption of a linear mapping
of guest address space to user space. Previously, guest memory was
translated to user addresses using a fixed offset (gmsor). The new
code uses gmap_fault to resolve guest addresses.

Signed-off-by: default avatarCarsten Otte <cotte@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 598841ca
Loading
Loading
Loading
Loading
+1 −3
Original line number Original line Diff line number Diff line
@@ -93,9 +93,7 @@ struct kvm_s390_sie_block {
	__u32	scaol;			/* 0x0064 */
	__u32	scaol;			/* 0x0064 */
	__u8	reserved68[4];		/* 0x0068 */
	__u8	reserved68[4];		/* 0x0068 */
	__u32	todpr;			/* 0x006c */
	__u32	todpr;			/* 0x006c */
	__u8	reserved70[16];		/* 0x0070 */
	__u8	reserved70[32];		/* 0x0070 */
	__u64	gmsor;			/* 0x0080 */
	__u64	gmslm;			/* 0x0088 */
	psw_t	gpsw;			/* 0x0090 */
	psw_t	gpsw;			/* 0x0090 */
	__u64	gg14;			/* 0x00a0 */
	__u64	gg14;			/* 0x00a0 */
	__u64	gg15;			/* 0x00a8 */
	__u64	gg15;			/* 0x00a8 */
+175 −68
Original line number Original line Diff line number Diff line
/*
/*
 * gaccess.h -  access guest memory
 * access.h -  access guest memory
 *
 *
 * Copyright IBM Corp. 2008,2009
 * Copyright IBM Corp. 2008,2009
 *
 *
@@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
					       unsigned long guestaddr)
					       unsigned long guestaddr)
{
{
	unsigned long prefix  = vcpu->arch.sie_block->prefix;
	unsigned long prefix  = vcpu->arch.sie_block->prefix;
	unsigned long origin  = vcpu->arch.sie_block->gmsor;
	unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);


	if (guestaddr < 2 * PAGE_SIZE)
	if (guestaddr < 2 * PAGE_SIZE)
		guestaddr += prefix;
		guestaddr += prefix;
	else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
	else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
		guestaddr -= prefix;
		guestaddr -= prefix;


	if (guestaddr > memsize)
	return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
		return (void __user __force *) ERR_PTR(-EFAULT);

	guestaddr += origin;

	return (void __user *) guestaddr;
}
}


static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
@@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,


static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
				       unsigned long guestdest,
				       unsigned long guestdest,
				       const void *from, unsigned long n)
				       void *from, unsigned long n)
{
{
	int rc;
	int rc;
	unsigned long i;
	unsigned long i;
	const u8 *data = from;
	u8 *data = from;


	for (i = 0; i < n; i++) {
	for (i = 0; i < n; i++) {
		rc = put_guest_u8(vcpu, guestdest++, *(data++));
		rc = put_guest_u8(vcpu, guestdest++, *(data++));
@@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
	return 0;
	return 0;
}
}


static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
				       unsigned long guestdest,
				       void *from, unsigned long n)
{
	int r;
	void __user *uptr;
	unsigned long size;

	if (guestdest + n < guestdest)
		return -EFAULT;

	/* simple case: all within one segment table entry? */
	if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
		uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);

		if (IS_ERR((void __force *) uptr))
			return PTR_ERR((void __force *) uptr);

		r = copy_to_user(uptr, from, n);

		if (r)
			r = -EFAULT;

		goto out;
	}

	/* copy first segment */
	uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);

	if (IS_ERR((void __force *) uptr))
		return PTR_ERR((void __force *) uptr);

	size = PMD_SIZE - (guestdest & ~PMD_MASK);

	r = copy_to_user(uptr, from, size);

	if (r) {
		r = -EFAULT;
		goto out;
	}
	from += size;
	n -= size;
	guestdest += size;

	/* copy full segments */
	while (n >= PMD_SIZE) {
		uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);

		if (IS_ERR((void __force *) uptr))
			return PTR_ERR((void __force *) uptr);

		r = copy_to_user(uptr, from, PMD_SIZE);

		if (r) {
			r = -EFAULT;
			goto out;
		}
		from += PMD_SIZE;
		n -= PMD_SIZE;
		guestdest += PMD_SIZE;
	}

	/* copy the tail segment */
	if (n) {
		uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);

		if (IS_ERR((void __force *) uptr))
			return PTR_ERR((void __force *) uptr);

		r = copy_to_user(uptr, from, n);

		if (r)
			r = -EFAULT;
	}
out:
	return r;
}

static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
					 unsigned long guestdest,
					 void *from, unsigned long n)
{
	return __copy_to_guest_fast(vcpu, guestdest, from, n);
}

static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
				const void *from, unsigned long n)
				void *from, unsigned long n)
{
{
	unsigned long prefix  = vcpu->arch.sie_block->prefix;
	unsigned long prefix  = vcpu->arch.sie_block->prefix;
	unsigned long origin  = vcpu->arch.sie_block->gmsor;
	unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);


	if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
	if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
		goto slowpath;
		goto slowpath;
@@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
	else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
	else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
		guestdest -= prefix;
		guestdest -= prefix;


	if (guestdest + n > memsize)
	return __copy_to_guest_fast(vcpu, guestdest, from, n);
		return -EFAULT;

	if (guestdest + n < guestdest)
		return -EFAULT;

	guestdest += origin;

	return copy_to_user((void __user *) guestdest, from, n);
slowpath:
slowpath:
	return __copy_to_guest_slow(vcpu, guestdest, from, n);
	return __copy_to_guest_slow(vcpu, guestdest, from, n);
}
}
@@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
	return 0;
	return 0;
}
}


static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
				  unsigned long guestsrc, unsigned long n)
					 unsigned long guestsrc,
					 unsigned long n)
{
{
	unsigned long prefix  = vcpu->arch.sie_block->prefix;
	int r;
	unsigned long origin  = vcpu->arch.sie_block->gmsor;
	void __user *uptr;
	unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
	unsigned long size;


	if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
	if (guestsrc + n < guestsrc)
		goto slowpath;
		return -EFAULT;


	if ((guestsrc < prefix) && (guestsrc + n > prefix))
	/* simple case: all within one segment table entry? */
		goto slowpath;
	if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
		uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);


	if ((guestsrc < prefix + 2 * PAGE_SIZE)
		if (IS_ERR((void __force *) uptr))
	    && (guestsrc + n > prefix + 2 * PAGE_SIZE))
			return PTR_ERR((void __force *) uptr);
		goto slowpath;


	if (guestsrc < 2 * PAGE_SIZE)
		r = copy_from_user(to, uptr, n);
		guestsrc += prefix;
	else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
		guestsrc -= prefix;


	if (guestsrc + n > memsize)
		if (r)
		return -EFAULT;
			r = -EFAULT;


	if (guestsrc + n < guestsrc)
		goto out;
		return -EFAULT;
	}


	guestsrc += origin;
	/* copy first segment */
	uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);


	return copy_from_user(to, (void __user *) guestsrc, n);
	if (IS_ERR((void __force *) uptr))
slowpath:
		return PTR_ERR((void __force *) uptr);
	return __copy_from_guest_slow(vcpu, to, guestsrc, n);

	size = PMD_SIZE - (guestsrc & ~PMD_MASK);

	r = copy_from_user(to, uptr, size);

	if (r) {
		r = -EFAULT;
		goto out;
	}
	}
	to += size;
	n -= size;
	guestsrc += size;


static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
	/* copy full segments */
					 unsigned long guestdest,
	while (n >= PMD_SIZE) {
					 const void *from, unsigned long n)
		uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
{
	unsigned long origin  = vcpu->arch.sie_block->gmsor;
	unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);


	if (guestdest + n > memsize)
		if (IS_ERR((void __force *) uptr))
		return -EFAULT;
			return PTR_ERR((void __force *) uptr);


	if (guestdest + n < guestdest)
		r = copy_from_user(to, uptr, PMD_SIZE);
		return -EFAULT;

		if (r) {
			r = -EFAULT;
			goto out;
		}
		to += PMD_SIZE;
		n -= PMD_SIZE;
		guestsrc += PMD_SIZE;
	}

	/* copy the tail segment */
	if (n) {
		uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);

		if (IS_ERR((void __force *) uptr))
			return PTR_ERR((void __force *) uptr);


	guestdest += origin;
		r = copy_from_user(to, uptr, n);


	return copy_to_user((void __user *) guestdest, from, n);
		if (r)
			r = -EFAULT;
	}
out:
	return r;
}
}


static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
					   unsigned long guestsrc,
					   unsigned long guestsrc,
					   unsigned long n)
					   unsigned long n)
{
{
	unsigned long origin  = vcpu->arch.sie_block->gmsor;
	return __copy_from_guest_fast(vcpu, to, guestsrc, n);
	unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
}


	if (guestsrc + n > memsize)
static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
		return -EFAULT;
				  unsigned long guestsrc, unsigned long n)
{
	unsigned long prefix  = vcpu->arch.sie_block->prefix;


	if (guestsrc + n < guestsrc)
	if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
		return -EFAULT;
		goto slowpath;

	if ((guestsrc < prefix) && (guestsrc + n > prefix))
		goto slowpath;


	guestsrc += origin;
	if ((guestsrc < prefix + 2 * PAGE_SIZE)
	    && (guestsrc + n > prefix + 2 * PAGE_SIZE))
		goto slowpath;


	return copy_from_user(to, (void __user *) guestsrc, n);
	if (guestsrc < 2 * PAGE_SIZE)
		guestsrc += prefix;
	else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
		guestsrc -= prefix;

	return __copy_from_guest_fast(vcpu, to, guestsrc, n);
slowpath:
	return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}
}
#endif
#endif
+14 −10
Original line number Original line Diff line number Diff line
@@ -165,26 +165,30 @@ static int handle_validity(struct kvm_vcpu *vcpu)
	int rc;
	int rc;


	vcpu->stat.exit_validity++;
	vcpu->stat.exit_validity++;
	if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
	if (viwhy == 0x37) {
		<= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) {
		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
		rc = fault_in_pages_writeable((char __user *)
				    vcpu->arch.gmap);
			 vcpu->arch.sie_block->gmsor +
		if (IS_ERR_VALUE(vmaddr)) {
			 vcpu->arch.sie_block->prefix,
			rc = -EOPNOTSUPP;
			 2*PAGE_SIZE);
			goto out;
		}
		rc = fault_in_pages_writeable((char __user *) vmaddr,
			 PAGE_SIZE);
		if (rc) {
		if (rc) {
			/* user will receive sigsegv, exit to user */
			/* user will receive sigsegv, exit to user */
			rc = -EOPNOTSUPP;
			rc = -EOPNOTSUPP;
			goto out;
			goto out;
		}
		}
		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
				    vcpu->arch.gmap);
				    vcpu->arch.gmap);
		if (IS_ERR_VALUE(vmaddr)) {
		if (IS_ERR_VALUE(vmaddr)) {
			rc = -EOPNOTSUPP;
			rc = -EOPNOTSUPP;
			goto out;
			goto out;
		}
		}
		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
		rc = fault_in_pages_writeable((char __user *) vmaddr,
				    vcpu->arch.gmap);
			 PAGE_SIZE);
		if (IS_ERR_VALUE(vmaddr)) {
		if (rc) {
			/* user will receive sigsegv, exit to user */
			rc = -EOPNOTSUPP;
			rc = -EOPNOTSUPP;
			goto out;
			goto out;
		}
		}
+2 −2
Original line number Original line Diff line number Diff line
@@ -549,7 +549,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
	return rc;
	return rc;
}
}


static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
		       unsigned long n, int prefix)
		       unsigned long n, int prefix)
{
{
	if (prefix)
	if (prefix)
@@ -566,7 +566,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
 */
 */
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
{
	const unsigned char archmode = 1;
	unsigned char archmode = 1;
	int prefix;
	int prefix;


	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
+0 −23
Original line number Original line Diff line number Diff line
@@ -58,31 +58,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);


static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.sie_block->gmslm
		- vcpu->arch.sie_block->gmsor
		- VIRTIODESCSPACE + 1ul;
}

static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
{
{
	int idx;
	struct kvm_memory_slot *mem;
	struct kvm_memslots *memslots;

	idx = srcu_read_lock(&vcpu->kvm->srcu);
	memslots = kvm_memslots(vcpu->kvm);

	mem = &memslots->memslots[0];

	vcpu->arch.sie_block->gmsor = mem->userspace_addr;
	vcpu->arch.sie_block->gmslm =
		mem->userspace_addr +
		(mem->npages << PAGE_SHIFT) +
		VIRTIODESCSPACE - 1ul;

	srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
}


/* implemented in priv.c */
/* implemented in priv.c */
Loading