Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a49072bb authored by Tony Luck's avatar Tony Luck
Browse files

Pull kvm-patches into release branch

parents fc494d6c 31a6b11f
Loading
Loading
Loading
Loading
+49 −0
Original line number Diff line number Diff line
@@ -97,6 +97,7 @@

#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/tlb.h>

#include "mca_drv.h"
#include "entry.h"
@@ -112,6 +113,7 @@ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */
DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
DEFINE_PER_CPU(u64, ia64_mca_tr_reload);   /* Flag for TR reload */

unsigned long __per_cpu_mca[NR_CPUS];

@@ -1182,6 +1184,49 @@ ia64_wait_for_slaves(int monarch, const char *type)
	return;
}

/*  mca_insert_tr
 *
 *  Switch rid when TR reload and needed!
 *  iord: 1: itr, 2: itr;
 *
*/
static void mca_insert_tr(u64 iord)
{

	int i;
	u64 old_rr;
	struct ia64_tr_entry *p;
	unsigned long psr;
	int cpu = smp_processor_id();

	psr = ia64_clear_ic();
	for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
		p = &__per_cpu_idtrs[cpu][iord-1][i];
		if (p->pte & 0x1) {
			old_rr = ia64_get_rr(p->ifa);
			if (old_rr != p->rr) {
				ia64_set_rr(p->ifa, p->rr);
				ia64_srlz_d();
			}
			ia64_ptr(iord, p->ifa, p->itir >> 2);
			ia64_srlz_i();
			if (iord & 0x1) {
				ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
				ia64_srlz_i();
			}
			if (iord & 0x2) {
				ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
				ia64_srlz_i();
			}
			if (old_rr != p->rr) {
				ia64_set_rr(p->ifa, old_rr);
				ia64_srlz_d();
			}
		}
	}
	ia64_set_psr(psr);
}

/*
 * ia64_mca_handler
 *
@@ -1271,6 +1316,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
		monarch_cpu = -1;
#endif
	}
	if (__get_cpu_var(ia64_mca_tr_reload)) {
		mca_insert_tr(0x1); /*Reload dynamic itrs*/
		mca_insert_tr(0x2); /*Reload dynamic itrs*/
	}
	if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
			== NOTIFY_STOP)
		ia64_mca_spin(__func__);
+5 −0
Original line number Diff line number Diff line
@@ -219,8 +219,13 @@ ia64_reload_tr:
	mov r20=IA64_TR_CURRENT_STACK
	;;
	itr.d dtr[r20]=r16
	GET_THIS_PADDR(r2, ia64_mca_tr_reload)
	mov r18 = 1
	;;
	srlz.d
	;;
	st8 [r2] =r18
	;;

done_tlb_purge_and_reload:

+82 −0
Original line number Diff line number Diff line
@@ -209,6 +209,19 @@ send_IPI_allbutself (int op)
	}
}

/*
 * Called with preemption disabled.
 */
static inline void
send_IPI_mask(cpumask_t mask, int op)
{
	unsigned int cpu;

	for_each_cpu_mask(cpu, mask) {
			send_IPI_single(cpu, op);
	}
}

/*
 * Called with preemption disabled.
 */
@@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
}
EXPORT_SYMBOL(smp_call_function_single);

/**
 * smp_call_function_mask(): Run a function on a set of other CPUs.
 * <mask>	The set of cpus to run on.  Must not include the current cpu.
 * <func> 	The function to run. This must be fast and non-blocking.
 * <info>	An arbitrary pointer to pass to the function.
 * <wait>	If true, wait (atomically) until function
 *		has completed on other CPUs.
 *
 * Returns 0 on success, else a negative status code.
 *
 * If @wait is true, then returns once @func has returned; otherwise
 * it returns just before the target cpu calls @func.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
int smp_call_function_mask(cpumask_t mask,
			   void (*func)(void *), void *info,
			   int wait)
{
	struct call_data_struct data;
	cpumask_t allbutself;
	int cpus;

	spin_lock(&call_lock);
	allbutself = cpu_online_map;
	cpu_clear(smp_processor_id(), allbutself);

	cpus_and(mask, mask, allbutself);
	cpus = cpus_weight(mask);
	if (!cpus) {
		spin_unlock(&call_lock);
		return 0;
	}

	/* Can deadlock when called with interrupts disabled */
	WARN_ON(irqs_disabled());

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	call_data = &data;
	mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/

	/* Send a message to other CPUs */
	if (cpus_equal(mask, allbutself))
		send_IPI_allbutself(IPI_CALL_FUNC);
	else
		send_IPI_mask(mask, IPI_CALL_FUNC);

	/* Wait for response */
	while (atomic_read(&data.started) != cpus)
		cpu_relax();

	if (wait)
		while (atomic_read(&data.finished) != cpus)
			cpu_relax();
	call_data = NULL;

	spin_unlock(&call_lock);
	return 0;

}
EXPORT_SYMBOL(smp_call_function_mask);

/*
 * this function sends a 'generic call function' IPI to all other CPUs
 * in the system.
+196 −0
Original line number Diff line number Diff line
@@ -26,6 +26,8 @@
#include <asm/pal.h>
#include <asm/tlbflush.h>
#include <asm/dma.h>
#include <asm/processor.h>
#include <asm/tlb.h>

static struct {
	unsigned long mask;	/* mask of supported purge page-sizes */
@@ -39,6 +41,10 @@ struct ia64_ctx ia64_ctx = {
};

DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/

struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];

/*
 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -190,6 +196,9 @@ ia64_tlb_init (void)
	ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
	unsigned long tr_pgbits;
	long status;
	pal_vm_info_1_u_t vm_info_1;
	pal_vm_info_2_u_t vm_info_2;
	int cpu = smp_processor_id();

	if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
		printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
@@ -206,4 +215,191 @@ ia64_tlb_init (void)
	local_cpu_data->ptce_stride[1] = ptce_info.stride[1];

	local_flush_tlb_all();	/* nuke left overs from bootstrapping... */
	status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);

	if (status) {
		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
		per_cpu(ia64_tr_num, cpu) = 8;
		return;
	}
	per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
	if (per_cpu(ia64_tr_num, cpu) >
				(vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
		per_cpu(ia64_tr_num, cpu) =
				vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
	if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
		per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
		printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!"
			"IA64_TR_ALLOC_MAX should be extended\n");
	}
}

/*
 * is_tr_overlap
 *
 * Check overlap with inserted TRs.
 */
static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
{
	u64 tr_log_size;
	u64 tr_end;
	u64 va_rr = ia64_get_rr(va);
	u64 va_rid = RR_TO_RID(va_rr);
	u64 va_end = va + (1<<log_size) - 1;

	if (va_rid != RR_TO_RID(p->rr))
		return 0;
	tr_log_size = (p->itir & 0xff) >> 2;
	tr_end = p->ifa + (1<<tr_log_size) - 1;

	if (va > tr_end || p->ifa > va_end)
		return 0;
	return 1;

}

/*
 * ia64_insert_tr in virtual mode. Allocate a TR slot
 *
 * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
 *
 * va 	: virtual address.
 * pte 	: pte entries inserted.
 * log_size: range to be covered.
 *
 * Return value:  <0 :  error No.
 *
 *		  >=0 : slot number allocated for TR.
 * Must be called with preemption disabled.
 */
int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
{
	int i, r;
	unsigned long psr;
	struct ia64_tr_entry *p;
	int cpu = smp_processor_id();

	r = -EINVAL;
	/*Check overlap with existing TR entries*/
	if (target_mask & 0x1) {
		p = &__per_cpu_idtrs[cpu][0][0];
		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
								i++, p++) {
			if (p->pte & 0x1)
				if (is_tr_overlap(p, va, log_size)) {
					printk(KERN_DEBUG "Overlapped Entry"
						"Inserted for TR Reigster!!\n");
					goto out;
			}
		}
	}
	if (target_mask & 0x2) {
		p = &__per_cpu_idtrs[cpu][1][0];
		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
								i++, p++) {
			if (p->pte & 0x1)
				if (is_tr_overlap(p, va, log_size)) {
					printk(KERN_DEBUG "Overlapped Entry"
						"Inserted for TR Reigster!!\n");
					goto out;
				}
		}
	}

	for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
		switch (target_mask & 0x3) {
		case 1:
			if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
				goto found;
			continue;
		case 2:
			if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
				goto found;
			continue;
		case 3:
			if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
				!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
				goto found;
			continue;
		default:
			r = -EINVAL;
			goto out;
		}
	}
found:
	if (i >= per_cpu(ia64_tr_num, cpu))
		return -EBUSY;

	/*Record tr info for mca hander use!*/
	if (i > per_cpu(ia64_tr_used, cpu))
		per_cpu(ia64_tr_used, cpu) = i;

	psr = ia64_clear_ic();
	if (target_mask & 0x1) {
		ia64_itr(0x1, i, va, pte, log_size);
		ia64_srlz_i();
		p = &__per_cpu_idtrs[cpu][0][i];
		p->ifa = va;
		p->pte = pte;
		p->itir = log_size << 2;
		p->rr = ia64_get_rr(va);
	}
	if (target_mask & 0x2) {
		ia64_itr(0x2, i, va, pte, log_size);
		ia64_srlz_i();
		p = &__per_cpu_idtrs[cpu][1][i];
		p->ifa = va;
		p->pte = pte;
		p->itir = log_size << 2;
		p->rr = ia64_get_rr(va);
	}
	ia64_set_psr(psr);
	r = i;
out:
	return r;
}
EXPORT_SYMBOL_GPL(ia64_itr_entry);

/*
 * ia64_purge_tr
 *
 * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
 * slot: slot number to be freed.
 *
 * Must be called with preemption disabled.
 */
void ia64_ptr_entry(u64 target_mask, int slot)
{
	int cpu = smp_processor_id();
	int i;
	struct ia64_tr_entry *p;

	if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
		return;

	if (target_mask & 0x1) {
		p = &__per_cpu_idtrs[cpu][0][slot];
		if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
			p->pte = 0;
			ia64_ptr(0x1, p->ifa, p->itir>>2);
			ia64_srlz_i();
		}
	}

	if (target_mask & 0x2) {
		p = &__per_cpu_idtrs[cpu][1][slot];
		if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
			p->pte = 0;
			ia64_ptr(0x2, p->ifa, p->itir>>2);
			ia64_srlz_i();
		}
	}

	for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
		if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
				(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
			break;
	}
	per_cpu(ia64_tr_used, cpu) = i;
}
EXPORT_SYMBOL_GPL(ia64_ptr_entry);
+3 −0
Original line number Diff line number Diff line
@@ -31,6 +31,9 @@
#define IA64_TR_PALCODE		1	/* itr1: maps PALcode as required by EFI */
#define IA64_TR_CURRENT_STACK	1	/* dtr1: maps kernel's memory- & register-stacks */

#define IA64_TR_ALLOC_BASE	2 	/* itr&dtr: Base of dynamic TR resource*/
#define IA64_TR_ALLOC_MAX	32 	/* Max number for dynamic use*/

/* Processor status register bits: */
#define IA64_PSR_BE_BIT		1
#define IA64_PSR_UP_BIT		2
Loading