Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b8274e9 authored by Jack Steiner's avatar Jack Steiner Committed by Linus Torvalds
Browse files

sgi-gru: support multiple pagesizes in GRU



Add multiple pagesize support to the GRU driver.

Signed-off-by: default avatarJack Steiner <steiner@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 27ca8a7b
Loading
Loading
Loading
Loading
+15 −0
Original line number Diff line number Diff line
@@ -360,6 +360,13 @@ static int gru_try_dropin(struct gru_thread_state *gts,
	if (ret == -2)
		goto failupm;

	if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
		gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
		if (atomic || !gru_update_cch(gts, 0)) {
			gts->ts_force_cch_reload = 1;
			goto failupm;
		}
	}
	gru_cb_set_istatus_active(cb);
	tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
			  GRU_PAGESIZE(pageshift));
@@ -535,6 +542,14 @@ int gru_handle_user_call_os(unsigned long cb)
		gts->ts_force_unload = 1;
	}

	/*
	 * CCH may contain stale data if ts_force_cch_reload is set.
	 */
	if (gts->ts_gru && gts->ts_force_cch_reload) {
		gru_update_cch(gts, 0);
		gts->ts_force_cch_reload = 0;
	}

	ret = -EAGAIN;
	cbrnum = thread_cbr_number(gts, ucbnum);
	if (gts->ts_force_unload) {
+4 −30
Original line number Diff line number Diff line
@@ -72,42 +72,16 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
	return status;
}

#if defined CONFIG_IA64
static void cch_allocate_set_asids(
		  struct gru_context_configuration_handle *cch, int asidval)
int cch_allocate(struct gru_context_configuration_handle *cch,
		int asidval, int sizeavail, unsigned long cbrmap,
		unsigned long dsrmap)
{
	int i;

	for (i = 0; i < 8; i++) {
		cch->asid[i] = (asidval++);
#if 0
		/* ZZZ hugepages not supported yet */
		if (i == RGN_HPAGE)
			cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift);
		else
#endif
			cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT);
	}
		cch->sizeavail[i] = sizeavail;
	}
#elif defined CONFIG_X86_64
static void cch_allocate_set_asids(
		  struct gru_context_configuration_handle *cch, int asidval)
{
	int i;

	for (i = 0; i < 8; i++) {
		cch->asid[i] = asidval++;
		cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) |
			GRU_SIZEAVAIL(21);
	}
}
#endif

int cch_allocate(struct gru_context_configuration_handle *cch,
			       int asidval, unsigned long cbrmap,
			       unsigned long dsrmap)
{
	cch_allocate_set_asids(cch, asidval);
	cch->dsr_allocation_map = dsrmap;
	cch->cbr_allocation_map = cbrmap;
	cch->opc = CCHOP_ALLOCATE;
+1 −1
Original line number Diff line number Diff line
@@ -496,7 +496,7 @@ enum gru_cbr_state {
#define GRUMAXINVAL		1024UL

int cch_allocate(struct gru_context_configuration_handle *cch,
       int asidval, unsigned long cbrmap, unsigned long dsrmap);
       int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap);

int cch_start(struct gru_context_configuration_handle *cch);
int cch_interrupt(struct gru_context_configuration_handle *cch);
+1 −1
Original line number Diff line number Diff line
@@ -672,7 +672,7 @@ int gru_kservices_init(struct gru_state *gru)
	cch->tlb_int_enable = 0;
	cch->tfm_done_bit_enable = 0;
	cch->unmap_enable = 1;
	err = cch_allocate(cch, 0, cbr_map, dsr_map);
	err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
	if (err) {
		gru_dbg(grudev,
			"Unable to allocate kernel CCH: gid %d, err %d\n",
+11 −6
Original line number Diff line number Diff line
@@ -326,6 +326,7 @@ static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
	gts->ts_vma = vma;
	gts->ts_tlb_int_select = -1;
	gts->ts_gms = gru_register_mmu_notifier();
	gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
	if (!gts->ts_gms)
		goto err;

@@ -552,7 +553,8 @@ static void gru_load_context(struct gru_thread_state *gts)
		cch->tlb_int_select = gts->ts_tlb_int_select;
	}
	cch->tfm_done_bit_enable = 0;
	err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map);
	err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map,
				gts->ts_dsr_map);
	if (err) {
		gru_dbg(grudev,
			"err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
@@ -573,11 +575,12 @@ static void gru_load_context(struct gru_thread_state *gts)
/*
 * Update fields in an active CCH:
 * 	- retarget interrupts on local blade
 * 	- update sizeavail mask
 * 	- force a delayed context unload by clearing the CCH asids. This
 * 	  forces TLB misses for new GRU instructions. The context is unloaded
 * 	  when the next TLB miss occurs.
 */
static int gru_update_cch(struct gru_thread_state *gts, int int_select)
int gru_update_cch(struct gru_thread_state *gts, int force_unload)
{
	struct gru_context_configuration_handle *cch;
	struct gru_state *gru = gts->ts_gru;
@@ -591,9 +594,11 @@ static int gru_update_cch(struct gru_thread_state *gts, int int_select)
			goto exit;
		if (cch_interrupt(cch))
			BUG();
		if (int_select >= 0) {
			gts->ts_tlb_int_select = int_select;
			cch->tlb_int_select = int_select;
		if (!force_unload) {
			for (i = 0; i < 8; i++)
				cch->sizeavail[i] = gts->ts_sizeavail;
			gts->ts_tlb_int_select = gru_cpu_fault_map_id();
			cch->tlb_int_select = gru_cpu_fault_map_id();
		} else {
			for (i = 0; i < 8; i++)
				cch->asid[i] = 0;
@@ -625,7 +630,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts)

	gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
		gru_cpu_fault_map_id());
	return gru_update_cch(gts, gru_cpu_fault_map_id());
	return gru_update_cch(gts, 0);
}


Loading