Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2b8d7af authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Martin Schwidefsky
Browse files

[S390] add support for nonquiescing sske



Improve performance of the sske operation by using the nonquiescing
variant if the affected page has no mappings established. On machines
with no support for the new sske variant the mask bit will be ignored.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 92f842ea
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -108,8 +108,12 @@ typedef pte_t *pgtable_t;
#define __pgprot(x)     ((pgprot_t) { (x) } )

static inline void
page_set_storage_key(unsigned long addr, unsigned int skey)
page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
{
	if (!mapped)
		asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
			     : : "d" (skey), "a" (addr));
	else
		asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
}

+4 −4
Original line number Diff line number Diff line
@@ -590,7 +590,7 @@ static inline void rcp_unlock(pte_t *ptep)
}

/* forward declaration for SetPageUptodate in page-flags.h*/
static inline void page_clear_dirty(struct page *page);
static inline void page_clear_dirty(struct page *page, int mapped);
#include <linux/page-flags.h>

static inline void ptep_rcp_copy(pte_t *ptep)
@@ -800,7 +800,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
	}
	dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
	if (skey & _PAGE_CHANGED)
		page_clear_dirty(page);
		page_clear_dirty(page, 1);
	rcp_unlock(ptep);
	return dirty;
}
@@ -975,9 +975,9 @@ static inline int page_test_dirty(struct page *page)
}

#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
static inline void page_clear_dirty(struct page *page)
static inline void page_clear_dirty(struct page *page, int mapped)
{
	page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
	page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped);
}

/*
+2 −1
Original line number Diff line number Diff line
@@ -208,7 +208,8 @@ static noinline __init void init_kernel_storage_key(void)
	end_pfn = PFN_UP(__pa(&_end));

	for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
		page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
		page_set_storage_key(init_pfn << PAGE_SHIFT,
				     PAGE_DEFAULT_KEY, 0);
}

static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
+2 −1
Original line number Diff line number Diff line
@@ -627,7 +627,8 @@ setup_memory(void)
		add_active_range(0, start_chunk, end_chunk);
		pfn = max(start_chunk, start_pfn);
		for (; pfn < end_chunk; pfn++)
			page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
			page_set_storage_key(PFN_PHYS(pfn),
					     PAGE_DEFAULT_KEY, 0);
	}

	psw_set_key(PAGE_DEFAULT_KEY);
+1 −1
Original line number Diff line number Diff line
@@ -108,7 +108,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#endif

#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
#define page_clear_dirty(page)		do { } while (0)
#define page_clear_dirty(page, mapped)	do { } while (0)
#endif

#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
Loading