Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3daa48d1 authored by Vineet Gupta's avatar Vineet Gupta
Browse files

ARC: [ASID] get_new_mmu_context() to conditionally allocate new ASID



ASID allocation changes/1

This patch does 2 things:

(1) get_new_mmu_context() NOW moves mm->ASID to a new value ONLY if it
    was from a prev allocation cycle/generation OR if mm had no ASID
    allocated (vs. before would unconditionally moving to a new ASID)

    Callers desiring unconditional update of ASID, e.g.local_flush_tlb_mm()
    (for parent's address space invalidation at fork) need to first force
    the parent to an unallocated ASID.

(2) get_new_mmu_context() always sets the MMU PID reg with unchanged/new
    ASID value.

The gains are:
- consolidation of all asid alloc logic into get_new_mmu_context()
- avoiding code duplication in switch_mm() for PID reg setting
- Enables future change to fold activate_mm() into switch_mm()

Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 5bd87adf
Loading
Loading
Loading
Loading
+18 −27
Original line number Original line Diff line number Diff line
@@ -69,8 +69,8 @@ extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
extern int asid_cache;
extern int asid_cache;


/*
/*
 * Assign a new ASID to task. If the task already has an ASID, it is
 * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
 * relinquished.
 * Also set the MMU PID register to existing/updated ASID
 */
 */
static inline void get_new_mmu_context(struct mm_struct *mm)
static inline void get_new_mmu_context(struct mm_struct *mm)
{
{
@@ -79,6 +79,17 @@ static inline void get_new_mmu_context(struct mm_struct *mm)


	local_irq_save(flags);
	local_irq_save(flags);


	/*
	 * Move to new ASID if it was not from current alloc-cycle/generation.
	 *
	 * Note: Callers needing new ASID unconditionally, independent of
	 * 	 generation, e.g. local_flush_tlb_mm() for forking  parent,
	 * 	 first need to destroy the context, setting it to invalid
	 * 	 value.
	 */
	if (mm->context.asid <= asid_cache)
		goto set_hw;

	/*
	/*
	 * Relinquish the currently owned ASID (if any).
	 * Relinquish the currently owned ASID (if any).
	 * Doing unconditionally saves a cmp-n-branch; for already unused
	 * Doing unconditionally saves a cmp-n-branch; for already unused
@@ -99,9 +110,9 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
	 * task with ASID from prev allocation cycle (before ASID roll-over).
	 * task with ASID from prev allocation cycle (before ASID roll-over).
	 *
	 *
	 * This might look wrong - if we are re-using some other task's ASID,
	 * This might look wrong - if we are re-using some other task's ASID,
	 * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
	 * won't we use it's stale TLB entries too. Actually the algorithm takes
	 * care of such a case: it ensures that task with ASID from prev alloc
	 * care of such a case: it ensures that task with ASID from prev alloc
	 * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
	 * cycle, when scheduled will refresh it's ASID
	 * The stealing scenario described here will only happen if that task
	 * The stealing scenario described here will only happen if that task
	 * didn't get a chance to refresh it's ASID - implying stale entries
	 * didn't get a chance to refresh it's ASID - implying stale entries
	 * won't exist.
	 * won't exist.
@@ -114,7 +125,8 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
	asid_mm_map[asid_cache] = mm;
	asid_mm_map[asid_cache] = mm;
	mm->context.asid = asid_cache;
	mm->context.asid = asid_cache;


	write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
set_hw:
	write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE);


	local_irq_restore(flags);
	local_irq_restore(flags);
}
}
@@ -141,28 +153,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif
#endif


	/*
	 * Get a new ASID if task doesn't have a valid one. Possible when
	 *  -task never had an ASID (fresh after fork)
	 *  -it's ASID was stolen - past an ASID roll-over.
	 *  -There's a third obscure scenario (if this task is running for the
	 *   first time afer an ASID rollover), where despite having a valid
	 *   ASID, we force a get for new ASID - see comments at top.
	 *
	 * Both the non-alloc scenario and first-use-after-rollover can be
	 * detected using the single condition below:  NO_ASID = 256
	 * while asid_cache is always a valid ASID value (0-255).
	 */
	if (next->context.asid > asid_cache) {
	get_new_mmu_context(next);
	get_new_mmu_context(next);
	} else {
		/*
		 * XXX: This will never happen given the chks above
		 * BUG_ON(next->context.asid > MAX_ASID);
		 */
		write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
	}

}
}


static inline void destroy_context(struct mm_struct *mm)
static inline void destroy_context(struct mm_struct *mm)
+7 −6
Original line number Original line Diff line number Diff line
@@ -258,13 +258,14 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
		return;
		return;


	/*
	/*
	 * Workaround for Android weirdism:
	 * - Move to a new ASID, but only if the mm is still wired in
	 * A binder VMA could end up in a task such that vma->mm != tsk->mm
	 *   (Android Binder ended up calling this for vma->mm != tsk->mm,
	 * old code would cause h/w - s/w ASID to get out of sync
	 *    causing h/w - s/w ASID to get out of sync)
	 * - Also get_new_mmu_context() new implementation allocates a new
	 *   ASID only if it is not allocated already - so unallocate first
	 */
	 */
	if (current->mm != mm)
	destroy_context(mm);
	destroy_context(mm);
	else
	if (current->mm == mm)
		get_new_mmu_context(mm);
		get_new_mmu_context(mm);
}
}