Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit efef127c authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-s390-next-4.6-1' of...

Merge tag 'kvm-s390-next-4.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Fixes and features for kvm/next (4.6)

1. also provide the floating point registers via sync regs
2. Separate out intruction vs. data accesses
3. Fix program interrupts in some cases
4. Documentation fixes
5. dirty log improvements for huge guests
parents bce87cce 1763f8d0
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -88,6 +88,8 @@ struct kvm_s390_io_adapter_req {
      perform a gmap translation for the guest address provided in addr,
      pin a userspace page for the translated address and add it to the
      list of mappings
      Note: A new mapping will be created unconditionally; therefore,
            the calling code should avoid making duplicate mappings.

    KVM_S390_IO_ADAPTER_UNMAP
      release a userspace page for the translated address specified in addr
+52 −0
Original line number Diff line number Diff line
@@ -84,3 +84,55 @@ Returns: -EBUSY in case 1 or more vcpus are already activated (only in write
	    -EFAULT if the given address is not accessible from kernel space
	    -ENOMEM if not enough memory is available to process the ioctl
	    0 in case of success

3. GROUP: KVM_S390_VM_TOD
Architectures: s390

3.1. ATTRIBUTE: KVM_S390_VM_TOD_HIGH

Allows user space to set/get the TOD clock extension (u8).

Parameters: address of a buffer in user space to store the data (u8) to
Returns:    -EFAULT if the given address is not accessible from kernel space
	    -EINVAL if setting the TOD clock extension to != 0 is not supported

3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW

Allows user space to set/get bits 0-63 of the TOD clock register as defined in
the POP (u64).

Parameters: address of a buffer in user space to store the data (u64) to
Returns:    -EFAULT if the given address is not accessible from kernel space

4. GROUP: KVM_S390_VM_CRYPTO
Architectures: s390

4.1. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_AES_KW (w/o)

Allows user space to enable aes key wrapping, including generating a new
wrapping key.

Parameters: none
Returns:    0

4.2. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_DEA_KW (w/o)

Allows user space to enable dea key wrapping, including generating a new
wrapping key.

Parameters: none
Returns:    0

4.3. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_AES_KW (w/o)

Allows user space to disable aes key wrapping, clearing the wrapping key.

Parameters: none
Returns:    0

4.4. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_DEA_KW (w/o)

Allows user space to disable dea key wrapping, clearing the wrapping key.

Parameters: none
Returns:    0
+1 −7
Original line number Diff line number Diff line
@@ -229,17 +229,11 @@ struct kvm_s390_itdb {
	__u8	data[256];
} __packed;

struct kvm_s390_vregs {
	__vector128 vrs[32];
	__u8	reserved200[512];	/* for future vector expansion */
} __packed;

struct sie_page {
	struct kvm_s390_sie_block sie_block;
	__u8 reserved200[1024];		/* 0x0200 */
	struct kvm_s390_itdb itdb;	/* 0x0600 */
	__u8 reserved700[1280];		/* 0x0700 */
	struct kvm_s390_vregs vregs;	/* 0x0c00 */
	__u8 reserved700[2304];		/* 0x0700 */
} __packed;

struct kvm_vcpu_stat {
+6 −2
Original line number Diff line number Diff line
@@ -154,6 +154,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_PFAULT (1UL << 5)
#define KVM_SYNC_VRS    (1UL << 6)
#define KVM_SYNC_RICCB  (1UL << 7)
#define KVM_SYNC_FPRS   (1UL << 8)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
	__u64 prefix;	/* prefix register */
@@ -168,9 +169,12 @@ struct kvm_sync_regs {
	__u64 pft;	/* pfault token [PFAULT] */
	__u64 pfs;	/* pfault select [PFAULT] */
	__u64 pfc;	/* pfault compare [PFAULT] */
	__u64 vrs[32][2];	/* vector registers */
	union {
		__u64 vrs[32][2];	/* vector registers (KVM_SYNC_VRS) */
		__u64 fprs[16];		/* fp registers (KVM_SYNC_FPRS) */
	};
	__u8  reserved[512];	/* for future vector expansion */
	__u32 fpc;	/* only valid with vector registers */
	__u32 fpc;		/* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
	__u8 padding[52];	/* riccb needs to be 64byte aligned */
	__u8 riccb[64];		/* runtime instrumentation controls block */
};
+30 −27
Original line number Diff line number Diff line
@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
}

static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
			  int write)
			  enum gacc_mode mode)
{
	union alet alet;
	struct ale ale;
@@ -454,7 +454,7 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
		}
	}

	if (ale.fo == 1 && write)
	if (ale.fo == 1 && mode == GACC_STORE)
		return PGM_PROTECTION;

	asce->val = aste.asce;
@@ -477,25 +477,28 @@ enum {
};

static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
			 ar_t ar, int write)
			 ar_t ar, enum gacc_mode mode)
{
	int rc;
	psw_t *psw = &vcpu->arch.sie_block->gpsw;
	struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
	struct trans_exc_code_bits *tec_bits;

	memset(pgm, 0, sizeof(*pgm));
	tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
	tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
	tec_bits->as = psw_bits(*psw).as;
	tec_bits->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
	tec_bits->as = psw.as;

	if (!psw_bits(*psw).t) {
	if (!psw.t) {
		asce->val = 0;
		asce->r = 1;
		return 0;
	}

	switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
	if (mode == GACC_IFETCH)
		psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY;

	switch (psw.as) {
	case PSW_AS_PRIMARY:
		asce->val = vcpu->arch.sie_block->gcr[1];
		return 0;
@@ -506,7 +509,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
		asce->val = vcpu->arch.sie_block->gcr[13];
		return 0;
	case PSW_AS_ACCREG:
		rc = ar_translation(vcpu, asce, ar, write);
		rc = ar_translation(vcpu, asce, ar, mode);
		switch (rc) {
		case PGM_ALEN_TRANSLATION:
		case PGM_ALE_SEQUENCE:
@@ -538,7 +541,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
 * @gva: guest virtual address
 * @gpa: points to where guest physical (absolute) address should be stored
 * @asce: effective asce
 * @write: indicates if access is a write access
 * @mode: indicates the access mode to be used
 *
 * Translate a guest virtual address into a guest absolute address by means
 * of dynamic address translation as specified by the architecture.
@@ -554,7 +557,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
 */
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
				     unsigned long *gpa, const union asce asce,
				     int write)
				     enum gacc_mode mode)
{
	union vaddress vaddr = {.addr = gva};
	union raddress raddr = {.addr = gva};
@@ -699,7 +702,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
real_address:
	raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
absolute_address:
	if (write && dat_protection)
	if (mode == GACC_STORE && dat_protection)
		return PGM_PROTECTION;
	if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
		return PGM_ADDRESSING;
@@ -728,7 +731,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,

static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
			    unsigned long *pages, unsigned long nr_pages,
			    const union asce asce, int write)
			    const union asce asce, enum gacc_mode mode)
{
	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
	psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -740,13 +743,13 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
	while (nr_pages) {
		ga = kvm_s390_logical_to_effective(vcpu, ga);
		tec_bits->addr = ga >> PAGE_SHIFT;
		if (write && lap_enabled && is_low_address(ga)) {
		if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) {
			pgm->code = PGM_PROTECTION;
			return pgm->code;
		}
		ga &= PAGE_MASK;
		if (psw_bits(*psw).t) {
			rc = guest_translate(vcpu, ga, pages, asce, write);
			rc = guest_translate(vcpu, ga, pages, asce, mode);
			if (rc < 0)
				return rc;
			if (rc == PGM_PROTECTION)
@@ -768,7 +771,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
}

int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
		 unsigned long len, int write)
		 unsigned long len, enum gacc_mode mode)
{
	psw_t *psw = &vcpu->arch.sie_block->gpsw;
	unsigned long _len, nr_pages, gpa, idx;
@@ -780,7 +783,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,

	if (!len)
		return 0;
	rc = get_vcpu_asce(vcpu, &asce, ar, write);
	rc = get_vcpu_asce(vcpu, &asce, ar, mode);
	if (rc)
		return rc;
	nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
@@ -792,11 +795,11 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
	need_ipte_lock = psw_bits(*psw).t && !asce.r;
	if (need_ipte_lock)
		ipte_lock(vcpu);
	rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
	rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, mode);
	for (idx = 0; idx < nr_pages && !rc; idx++) {
		gpa = *(pages + idx) + (ga & ~PAGE_MASK);
		_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
		if (write)
		if (mode == GACC_STORE)
			rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
		else
			rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
@@ -812,7 +815,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
}

int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
		      void *data, unsigned long len, int write)
		      void *data, unsigned long len, enum gacc_mode mode)
{
	unsigned long _len, gpa;
	int rc = 0;
@@ -820,7 +823,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
	while (len && !rc) {
		gpa = kvm_s390_real_to_abs(vcpu, gra);
		_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
		if (write)
		if (mode)
			rc = write_guest_abs(vcpu, gpa, data, _len);
		else
			rc = read_guest_abs(vcpu, gpa, data, _len);
@@ -841,7 +844,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
 * has to take care of this.
 */
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
			    unsigned long *gpa, int write)
			    unsigned long *gpa, enum gacc_mode mode)
{
	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
	psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -851,19 +854,19 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,

	gva = kvm_s390_logical_to_effective(vcpu, gva);
	tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
	rc = get_vcpu_asce(vcpu, &asce, ar, write);
	rc = get_vcpu_asce(vcpu, &asce, ar, mode);
	tec->addr = gva >> PAGE_SHIFT;
	if (rc)
		return rc;
	if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
		if (write) {
		if (mode == GACC_STORE) {
			rc = pgm->code = PGM_PROTECTION;
			return rc;
		}
	}

	if (psw_bits(*psw).t && !asce.r) {	/* Use DAT? */
		rc = guest_translate(vcpu, gva, gpa, asce, write);
		rc = guest_translate(vcpu, gva, gpa, asce, mode);
		if (rc > 0) {
			if (rc == PGM_PROTECTION)
				tec->b61 = 1;
@@ -883,7 +886,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
 * check_gva_range - test a range of guest virtual addresses for accessibility
 */
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
		    unsigned long length, int is_write)
		    unsigned long length, enum gacc_mode mode)
{
	unsigned long gpa;
	unsigned long currlen;
@@ -892,7 +895,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
	ipte_lock(vcpu);
	while (length > 0 && !rc) {
		currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
		rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
		rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
		gva += currlen;
		length -= currlen;
	}
Loading