Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5fe26b7a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

x86/mm/cpa: Simplify the code after making cpa->vaddr invariant



Since cpa->vaddr is invariant, this means we can remove all
workarounds that deal with it changing.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom.StDenis@amd.com
Cc: dave.hansen@intel.com
Link: http://lkml.kernel.org/r/20181203171043.366619025@infradead.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 98bfc9b0
Loading
Loading
Loading
Loading
+2 −5
Original line number Diff line number Diff line
@@ -124,7 +124,6 @@ static int pageattr_test(void)
	unsigned int level;
	int i, k;
	int err;
	unsigned long test_addr;

	if (print)
		printk(KERN_INFO "CPA self-test:\n");
@@ -181,8 +180,7 @@ static int pageattr_test(void)

		switch (i % 3) {
		case 0:
			test_addr = addr[i];
			err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0);
			err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0);
			break;

		case 1:
@@ -226,8 +224,7 @@ static int pageattr_test(void)
			failed++;
			continue;
		}
		test_addr = addr[i];
		err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0);
		err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0);
		if (err < 0) {
			printk(KERN_ERR "CPA reverting failed: %d\n", err);
			failed++;
+4 −9
Original line number Diff line number Diff line
@@ -1908,15 +1908,13 @@ EXPORT_SYMBOL_GPL(set_memory_array_wt);
int _set_memory_wc(unsigned long addr, int numpages)
{
	int ret;
	unsigned long addr_copy = addr;

	ret = change_page_attr_set(&addr, numpages,
				   cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
				   0);
	if (!ret) {
		ret = change_page_attr_set_clr(&addr_copy, numpages,
					       cachemode2pgprot(
						_PAGE_CACHE_MODE_WC),
		ret = change_page_attr_set_clr(&addr, numpages,
					       cachemode2pgprot(_PAGE_CACHE_MODE_WC),
					       __pgprot(_PAGE_CACHE_MASK),
					       0, 0, NULL);
	}
@@ -2064,7 +2062,6 @@ int set_memory_global(unsigned long addr, int numpages)
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
	struct cpa_data cpa;
	unsigned long start;
	int ret;

	/* Nothing to do if memory encryption is not active */
@@ -2075,8 +2072,6 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
	if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
		addr &= PAGE_MASK;

	start = addr;

	memset(&cpa, 0, sizeof(cpa));
	cpa.vaddr = &addr;
	cpa.numpages = numpages;
@@ -2091,7 +2086,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
	/*
	 * Before changing the encryption attribute, we need to flush caches.
	 */
	cpa_flush_range(start, numpages, 1);
	cpa_flush_range(addr, numpages, 1);

	ret = __change_page_attr_set_clr(&cpa, 1);

@@ -2102,7 +2097,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
	 * in case TLB flushing gets optimized in the cpa_flush_range()
	 * path use the same logic as above.
	 */
	cpa_flush_range(start, numpages, 0);
	cpa_flush_range(addr, numpages, 0);

	return ret;
}