Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dc5008b9 authored by Heiko Carstens's avatar Heiko Carstens Committed by Marcelo Tosatti
Browse files

s390/kvm: remove explicit -EFAULT return code checking on guest access



Let's change to the paradigm that every return code from guest memory
access functions that is not zero translates to -EFAULT and do not
explictly compare.
Explictly comparing the return value with -EFAULT has already shown to
be a bit fragile. In addition this is closer to the handling of
copy_to/from_user functions, which imho is in general a good idea.

Also shorten the return code handling in interrupt.c a bit.

Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 59a1fa2d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
	do {
		rc = get_guest_u64(vcpu, useraddr,
				   &vcpu->arch.sie_block->gcr[reg]);
		if (rc == -EFAULT) {
		if (rc) {
			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
			break;
		}
@@ -79,7 +79,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
	reg = reg1;
	do {
		rc = get_guest_u32(vcpu, useraddr, &val);
		if (rc == -EFAULT) {
		if (rc) {
			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
			break;
		}
+69 −172
Original line number Diff line number Diff line
@@ -180,7 +180,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
				   struct kvm_s390_interrupt_info *inti)
{
	const unsigned short table[] = { 2, 4, 4, 6 };
	int rc, exception = 0;
	int rc = 0;

	switch (inti->type) {
	case KVM_S390_INT_EMERGENCY:
@@ -189,48 +189,24 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
						 inti->emerg.code, 0);
		rc  = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
		rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
				      __LC_EXT_NEW_PSW, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;
		break;

	case KVM_S390_INT_EXTERNAL_CALL:
		VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
		vcpu->stat.deliver_external_call++;
		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
						 inti->extcall.code, 0);
		rc  = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
		rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
				      __LC_EXT_NEW_PSW, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;
		break;

	case KVM_S390_INT_SERVICE:
		VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
			   inti->ext.ext_params);
@@ -238,24 +214,12 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
						 inti->ext.ext_params, 0);
		rc  = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
				      __LC_EXT_NEW_PSW, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
		if (rc == -EFAULT)
			exception = 1;
		rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
		break;

	case KVM_S390_INT_VIRTIO:
		VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
			   inti->ext.ext_params, inti->ext.ext_params2);
@@ -264,33 +228,15 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
						 inti->ext.ext_params,
						 inti->ext.ext_params2);
		rc  = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
		rc |= put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
				      __LC_EXT_NEW_PSW, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
		rc |= put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
		rc |= put_guest_u64(vcpu, __LC_EXT_PARAMS2,
				    inti->ext.ext_params2);
		if (rc == -EFAULT)
			exception = 1;
		break;

	case KVM_S390_SIGP_STOP:
		VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
		vcpu->stat.deliver_stop_signal++;
@@ -313,18 +259,14 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
		vcpu->stat.deliver_restart_signal++;
		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
						 0, 0);
		rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
		  restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
			offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;
		rc  = copy_to_guest(vcpu,
				    offsetof(struct _lowcore, restart_old_psw),
				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
				      offsetof(struct _lowcore, restart_psw),
				      sizeof(psw_t));
		atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
		break;

	case KVM_S390_PROGRAM_INT:
		VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
			   inti->pgm.code,
@@ -333,23 +275,12 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
						 inti->pgm.code, 0);
		rc  = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u16(vcpu, __LC_PGM_ILC,
		rc |= put_guest_u16(vcpu, __LC_PGM_ILC,
				    table[vcpu->arch.sie_block->ipa >> 14]);
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
		rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
				      __LC_PGM_NEW_PSW, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;
		break;

	case KVM_S390_MCHK:
@@ -360,22 +291,11 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
						 inti->mchk.mcic);
		rc  = kvm_s390_vcpu_store_status(vcpu,
						 KVM_S390_STORE_STATUS_PREFIXED);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic);
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
		rc |= put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic);
		rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
				      __LC_MCK_NEW_PSW, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;
		break;

	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
@@ -390,39 +310,22 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
						 param0, param1);
		rc  = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID,
				    inti->io.subchannel_id);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR,
		rc |= put_guest_u16(vcpu, __LC_SUBCHANNEL_NR,
				    inti->io.subchannel_nr);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u32(vcpu, __LC_IO_INT_PARM,
		rc |= put_guest_u32(vcpu, __LC_IO_INT_PARM,
				    inti->io.io_int_parm);
		if (rc == -EFAULT)
			exception = 1;

		rc = put_guest_u32(vcpu, __LC_IO_INT_WORD,
		rc |= put_guest_u32(vcpu, __LC_IO_INT_WORD,
				    inti->io.io_int_word);
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW,
		rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW,
				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;

		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
				      __LC_IO_NEW_PSW, sizeof(psw_t));
		if (rc == -EFAULT)
			exception = 1;
		break;
	}
	default:
		BUG();
	}
	if (exception) {
	if (rc) {
		printk("kvm: The guest lowcore is not mapped during interrupt "
		       "delivery, killing userspace\n");
		do_exit(SIGKILL);
@@ -431,24 +334,18 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,

static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
{
	int rc, exception = 0;
	int rc;

	if (psw_extint_disabled(vcpu))
		return 0;
	if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
		return 0;
	rc  = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
	if (rc == -EFAULT)
		exception = 1;
	rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
	rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
	if (rc == -EFAULT)
		exception = 1;
	rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
	rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
			      __LC_EXT_NEW_PSW, sizeof(psw_t));
	if (rc == -EFAULT)
		exception = 1;
	if (exception) {
	if (rc) {
		printk("kvm: The guest lowcore is not mapped during interrupt "
			"delivery, killing userspace\n");
		do_exit(SIGKILL);
+3 −3
Original line number Diff line number Diff line
@@ -108,7 +108,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
	}

	rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
	if (rc == -EFAULT) {
	if (rc) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}
@@ -230,7 +230,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)

	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
			   &facility_list, sizeof(facility_list));
	if (rc == -EFAULT)
	if (rc)
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	else {
		VCPU_EVENT(vcpu, 5, "store facility list value %x",
@@ -348,7 +348,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
	}

	rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
	if (rc == -EFAULT) {
	if (rc) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}