Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e48d53a9 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

arm64: KVM: Add support for Stage-2 control of memory types and cacheability



Up to ARMv8.3, the combinaison of Stage-1 and Stage-2 attributes
results in the strongest attribute of the two stages.  This means
that the hypervisor has to perform quite a lot of cache maintenance
just in case the guest has some non-cacheable mappings around.

ARMv8.4 solves this problem by offering a different mode (FWB) where
Stage-2 has total control over the memory attribute (this is limited
to systems where both I/O and instruction fetches are coherent with
the dcache). This is achieved by having a different set of memory
attributes in the page tables, and a new bit set in HCR_EL2.

On such a system, we can then safely sidestep any form of dcache
management.

Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 1e4b044d
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -49,7 +49,8 @@
#define ARM64_HAS_CACHE_DIC			28
#define ARM64_HAS_CACHE_DIC			28
#define ARM64_HW_DBM				29
#define ARM64_HW_DBM				29
#define ARM64_SSBD				30
#define ARM64_SSBD				30
#define ARM64_HAS_STAGE2_FWB			31


#define ARM64_NCAPS				31
#define ARM64_NCAPS				32


#endif /* __ASM_CPUCAPS_H */
#endif /* __ASM_CPUCAPS_H */
+1 −0
Original line number Original line Diff line number Diff line
@@ -23,6 +23,7 @@
#include <asm/types.h>
#include <asm/types.h>


/* Hyp Configuration Register (HCR) bits */
/* Hyp Configuration Register (HCR) bits */
#define HCR_FWB		(UL(1) << 46)
#define HCR_TEA		(UL(1) << 37)
#define HCR_TEA		(UL(1) << 37)
#define HCR_TERR	(UL(1) << 36)
#define HCR_TERR	(UL(1) << 36)
#define HCR_TLOR	(UL(1) << 35)
#define HCR_TLOR	(UL(1) << 35)
+2 −0
Original line number Original line Diff line number Diff line
@@ -63,6 +63,8 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
		/* trap error record accesses */
		/* trap error record accesses */
		vcpu->arch.hcr_el2 |= HCR_TERR;
		vcpu->arch.hcr_el2 |= HCR_TERR;
	}
	}
	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
		vcpu->arch.hcr_el2 |= HCR_FWB;


	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
		vcpu->arch.hcr_el2 &= ~HCR_RW;
		vcpu->arch.hcr_el2 &= ~HCR_RW;
+21 −6
Original line number Original line Diff line number Diff line
@@ -267,6 +267,15 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
{
{
	void *va = page_address(pfn_to_page(pfn));
	void *va = page_address(pfn_to_page(pfn));


	/*
	 * With FWB, we ensure that the guest always accesses memory using
	 * cacheable attributes, and we don't have to clean to PoC when
	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
	 * PoU is not required either in this case.
	 */
	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
		return;

	kvm_flush_dcache_to_poc(va, size);
	kvm_flush_dcache_to_poc(va, size);
}
}


@@ -287,21 +296,27 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,


static inline void __kvm_flush_dcache_pte(pte_t pte)
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
{
	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
		struct page *page = pte_page(pte);
		struct page *page = pte_page(pte);
		kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
		kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
	}
	}
}


static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
{
	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
		struct page *page = pmd_page(pmd);
		struct page *page = pmd_page(pmd);
		kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
		kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
	}
	}
}


static inline void __kvm_flush_dcache_pud(pud_t pud)
static inline void __kvm_flush_dcache_pud(pud_t pud)
{
{
	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
		struct page *page = pud_page(pud);
		struct page *page = pud_page(pud);
		kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
		kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
	}
	}
}


#define kvm_virt_to_phys(x)		__pa_symbol(x)
#define kvm_virt_to_phys(x)		__pa_symbol(x)


+7 −0
Original line number Original line Diff line number Diff line
@@ -155,6 +155,13 @@
#define MT_S2_NORMAL		0xf
#define MT_S2_NORMAL		0xf
#define MT_S2_DEVICE_nGnRE	0x1
#define MT_S2_DEVICE_nGnRE	0x1


/*
 * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001
 * Stage-2 enforces Normal-WB and Device-nGnRE
 */
#define MT_S2_FWB_NORMAL	6
#define MT_S2_FWB_DEVICE_nGnRE	1

#ifdef CONFIG_ARM64_4K_PAGES
#ifdef CONFIG_ARM64_4K_PAGES
#define IOREMAP_MAX_ORDER	(PUD_SHIFT)
#define IOREMAP_MAX_ORDER	(PUD_SHIFT)
#else
#else
Loading