Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 320437af authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull s390 fixes from Martin Schwidefsky:
 "Several last minute bug fixes.

  Two of them are on the larger side for rc7, the dasd format patch for
  older storage devices and the store-clock-fast patch where we have
  been to optimistic with an optimization"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/time: correct use of store clock fast
  s390/vmlogrdr: fix array access in vmlogrdr_open()
  s390/compat,signal: fix return value of copy_siginfo_(to|from)_user32()
  s390/dasd: check for availability of prefix command during format
  s390/mm,kvm: fix software dirty bits vs. kvm for old machines
parents 90338325 8c071b0f
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -748,7 +748,9 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)

static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
{
	if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
	if (!MACHINE_HAS_ESOP &&
	    (pte_val(entry) & _PAGE_PRESENT) &&
	    (pte_val(entry) & _PAGE_WRITE)) {
		/*
		 * Without enhanced suppression-on-protection force
		 * the dirty bit on for all writable ptes.
+14 −14
Original line number Diff line number Diff line
@@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp)

typedef unsigned long long cycles_t;

static inline unsigned long long get_tod_clock(void)
{
	unsigned long long clk;

#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
	asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
#else
	asm volatile("stck %0" : "=Q" (clk) : : "cc");
#endif
	return clk;
}

static inline void get_tod_clock_ext(char *clk)
{
	asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
}

static inline unsigned long long get_tod_clock_xt(void)
static inline unsigned long long get_tod_clock(void)
{
	unsigned char clk[16];
	get_tod_clock_ext(clk);
	return *((unsigned long long *)&clk[1]);
}

static inline unsigned long long get_tod_clock_fast(void)
{
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
	unsigned long long clk;

	asm volatile("stckf %0" : "=Q" (clk) : : "cc");
	return clk;
#else
	return get_tod_clock();
#endif
}

static inline cycles_t get_cycles(void)
{
	return (cycles_t) get_tod_clock() >> 2;
@@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc;
 */
static inline unsigned long long get_tod_clock_monotonic(void)
{
	return get_tod_clock_xt() - sched_clock_base_cc;
	return get_tod_clock() - sched_clock_base_cc;
}

/**
+2 −2
Original line number Diff line number Diff line
@@ -99,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
			break;
		}
	}
	return err;
	return err ? -EFAULT : 0;
}

int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
@@ -148,7 +148,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
			break;
		}
	}
	return err;
	return err ? -EFAULT : 0;
}

static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
+1 −1
Original line number Diff line number Diff line
@@ -867,7 +867,7 @@ static inline void
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
			int exception)
{
	active->id.stck = get_tod_clock();
	active->id.stck = get_tod_clock_fast();
	active->id.fields.cpuid = smp_processor_id();
	active->caller = __builtin_return_address(0);
	active->id.fields.exception = exception;
+3 −3
Original line number Diff line number Diff line
@@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
	}

	if ((!rc) && (vcpu->arch.sie_block->ckc <
		get_tod_clock() + vcpu->arch.sie_block->epoch)) {
		get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
		if ((!psw_extint_disabled(vcpu)) &&
			(vcpu->arch.sie_block->gcr[0] & 0x800ul))
			rc = 1;
@@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
		goto no_timer;
	}

	now = get_tod_clock() + vcpu->arch.sie_block->epoch;
	now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
	if (vcpu->arch.sie_block->ckc < now) {
		__unset_cpu_idle(vcpu);
		return 0;
@@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
	}

	if ((vcpu->arch.sie_block->ckc <
		get_tod_clock() + vcpu->arch.sie_block->epoch))
		get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
		__try_deliver_ckc_interrupt(vcpu);

	if (atomic_read(&fi->active)) {
Loading