Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 04c6b3e2 authored by Max Filippov's avatar Max Filippov
Browse files

xtensa: optimize local_flush_tlb_kernel_range



Don't flush whole TLB if only a small kernel range is requested.

Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 8585b316
Loading
Loading
Loading
Loading
+4 −7
Original line number Original line Diff line number Diff line
@@ -36,6 +36,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma,
		unsigned long page);
		unsigned long page);
void local_flush_tlb_range(struct vm_area_struct *vma,
void local_flush_tlb_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end);
		unsigned long start, unsigned long end);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP


@@ -44,12 +45,7 @@ void flush_tlb_mm(struct mm_struct *);
void flush_tlb_page(struct vm_area_struct *, unsigned long);
void flush_tlb_page(struct vm_area_struct *, unsigned long);
void flush_tlb_range(struct vm_area_struct *, unsigned long,
void flush_tlb_range(struct vm_area_struct *, unsigned long,
		unsigned long);
		unsigned long);

void flush_tlb_kernel_range(unsigned long start, unsigned long end);
static inline void flush_tlb_kernel_range(unsigned long start,
		unsigned long end)
{
	flush_tlb_all();
}


#else /* !CONFIG_SMP */
#else /* !CONFIG_SMP */


@@ -58,7 +54,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#define flush_tlb_page(vma, page)	   local_flush_tlb_page(vma, page)
#define flush_tlb_page(vma, page)	   local_flush_tlb_page(vma, page)
#define flush_tlb_range(vma, vmaddr, end)  local_flush_tlb_range(vma, vmaddr, \
#define flush_tlb_range(vma, vmaddr, end)  local_flush_tlb_range(vma, vmaddr, \
								 end)
								 end)
#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
									end)


#endif /* CONFIG_SMP */
#endif /* CONFIG_SMP */


+15 −0
Original line number Original line Diff line number Diff line
@@ -496,6 +496,21 @@ void flush_tlb_range(struct vm_area_struct *vma,
	on_each_cpu(ipi_flush_tlb_range, &fd, 1);
	on_each_cpu(ipi_flush_tlb_range, &fd, 1);
}
}


static void ipi_flush_tlb_kernel_range(void *arg)
{
	struct flush_data *fd = arg;
	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	struct flush_data fd = {
		.addr1 = start,
		.addr2 = end,
	};
	on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
}

/* Cache flush functions */
/* Cache flush functions */


static void ipi_flush_cache_all(void *arg)
static void ipi_flush_cache_all(void *arg)
+15 −0
Original line number Original line Diff line number Diff line
@@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
	local_irq_restore(flags);
	local_irq_restore(flags);
}
}


void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
	    end - start < _TLB_ENTRIES << PAGE_SHIFT) {
		start &= PAGE_MASK;
		while (start < end) {
			invalidate_itlb_mapping(start);
			invalidate_dtlb_mapping(start);
			start += PAGE_SIZE;
		}
	} else {
		local_flush_tlb_all();
	}
}

#ifdef CONFIG_DEBUG_TLB_SANITY
#ifdef CONFIG_DEBUG_TLB_SANITY


static unsigned get_pte_for_vaddr(unsigned vaddr)
static unsigned get_pte_for_vaddr(unsigned vaddr)