Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 44b6cc81 authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

s390/mm,kvm: flush gmap address space with IDTE



The __tlb_flush_mm() helper uses a global flush if the mm struct
has a gmap structure attached to it. Replace the global flush with
two individual flushes by means of the IDTE instruction if only a
single gmap is attached the the mm.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent d5dcafee
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@ typedef struct {
	struct list_head pgtable_list;
	spinlock_t gmap_lock;
	struct list_head gmap_list;
	unsigned long gmap_asce;
	unsigned long asce;
	unsigned long asce_limit;
	unsigned long vdso_base;
+1 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
	INIT_LIST_HEAD(&mm->context.gmap_list);
	cpumask_clear(&mm->context.cpu_attach_mask);
	atomic_set(&mm->context.flush_count, 0);
	mm->context.gmap_asce = 0;
	mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE
	mm->context.alloc_pgste = page_table_allocate_pgste;
+17 −23
Original line number Diff line number Diff line
@@ -60,18 +60,25 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
	preempt_enable();
}

static inline void __tlb_flush_mm(struct mm_struct *mm)
{
	unsigned long gmap_asce;

	/*
 * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
 * when more than one asce (e.g. gmap) ran on this mm.
	 * If the machine has IDTE we prefer to do a per mm flush
	 * on all cpus instead of doing a local flush if the mm
	 * only ran on the local cpu.
	 */
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
	preempt_disable();
	atomic_inc(&mm->context.flush_count);
	if (MACHINE_HAS_IDTE)
		__tlb_flush_idte(asce);
	else
		__tlb_flush_global();
	gmap_asce = READ_ONCE(mm->context.gmap_asce);
	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
		if (gmap_asce)
			__tlb_flush_idte(gmap_asce);
		__tlb_flush_idte(mm->context.asce);
	} else {
		__tlb_flush_full(mm);
	}
	/* Reset TLB flush mask */
	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
	atomic_dec(&mm->context.flush_count);
@@ -92,7 +99,7 @@ static inline void __tlb_flush_kernel(void)
/*
 * Flush TLB entries for a specific ASCE on all CPUs.
 */
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_mm(struct mm_struct *mm)
{
	__tlb_flush_local();
}
@@ -103,19 +110,6 @@ static inline void __tlb_flush_kernel(void)
}
#endif

static inline void __tlb_flush_mm(struct mm_struct * mm)
{
	/*
	 * If the machine has IDTE we prefer to do a per mm flush
	 * on all cpus instead of doing a local flush if the mm
	 * only ran on the local cpu.
	 */
	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
		__tlb_flush_asce(mm, mm->context.asce);
	else
		__tlb_flush_full(mm);
}

static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
	if (mm->context.flush_mm) {
+15 −0
Original line number Diff line number Diff line
@@ -94,6 +94,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
{
	struct gmap *gmap;
	unsigned long gmap_asce;

	gmap = gmap_alloc(limit);
	if (!gmap)
@@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
	gmap->mm = mm;
	spin_lock(&mm->context.gmap_lock);
	list_add_rcu(&gmap->list, &mm->context.gmap_list);
	if (list_is_singular(&mm->context.gmap_list))
		gmap_asce = gmap->asce;
	else
		gmap_asce = -1UL;
	WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
	spin_unlock(&mm->context.gmap_lock);
	return gmap;
}
@@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put);
void gmap_remove(struct gmap *gmap)
{
	struct gmap *sg, *next;
	unsigned long gmap_asce;

	/* Remove all shadow gmaps linked to this gmap */
	if (!list_empty(&gmap->children)) {
@@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap)
	/* Remove gmap from the pre-mm list */
	spin_lock(&gmap->mm->context.gmap_lock);
	list_del_rcu(&gmap->list);
	if (list_empty(&gmap->mm->context.gmap_list))
		gmap_asce = 0;
	else if (list_is_singular(&gmap->mm->context.gmap_list))
		gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
					     struct gmap, list)->asce;
	else
		gmap_asce = -1UL;
	WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
	spin_unlock(&gmap->mm->context.gmap_lock);
	synchronize_rcu();
	/* Put reference */