Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 74ae9987 authored by David S. Miller's avatar David S. Miller
Browse files

[SPARC64]: Simplify TSB insert checks.



Don't try to avoid putting non-base page sized entries
into the user TSB.  It actually costs us more to check
this than it helps.

Eventually we'll have a multiple TSB scheme for user
processes.  Once a process starts using larger pages,
we'll allocate and use such a TSB.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3cab0c3e
Loading
Loading
Loading
Loading
+0 −14
Original line number Diff line number Diff line
@@ -55,20 +55,6 @@ tsb_reload:
	brgez,a,pn	%g5, tsb_do_fault
	 TSB_STORE(%g1, %g7)

	/* If it is larger than the base page size, don't
	 * bother putting it into the TSB.
	 */
	sethi		%hi(_PAGE_ALL_SZ_BITS), %g7
	ldx		[%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
	and		%g5, %g7, %g2
	sethi		%hi(_PAGE_SZBITS), %g7
	ldx		[%g7 + %lo(_PAGE_SZBITS)], %g7
	cmp		%g2, %g7
	mov		1, %g7
	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
	bne,a,pn	%xcc, tsb_tlb_reload
	 TSB_STORE(%g1, %g7)

	TSB_WRITE(%g1, %g5, %g6)

	/* Finally, load TLB and return from trap.  */
+6 −9
Original line number Diff line number Diff line
@@ -280,6 +280,8 @@ unsigned long _PAGE_SZBITS __read_mostly;
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
	struct mm_struct *mm;
	struct tsb *tsb;
	unsigned long tag;

	if (tlb_type != hypervisor) {
		unsigned long pfn = pte_pfn(pte);
@@ -308,16 +310,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
	}

	mm = vma->vm_mm;
	if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
		struct tsb *tsb;
		unsigned long tag;

	tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
			       (mm->context.tsb_nentries - 1UL)];
	tag = (address >> 22UL);
	tsb_insert(tsb, tag, pte_val(pte));
}
}

void flush_dcache_page(struct page *page)
{