Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a6d8c2c authored by Suzuki K Poulose's avatar Suzuki K Poulose Committed by Isaac J. Manjarres
Browse files

arm64: Delay enabling hardware DBM feature



We enable hardware DBM bit in a capable CPU, very early in the
boot via __cpu_setup. This doesn't give us a flexibility of
optionally disable the feature, as the clearing the bit
is a bit costly as the TLB can cache the settings. Instead,
we delay enabling the feature until the CPU is brought up
into the kernel. We use the feature capability mechanism
to handle it.

The hardware DBM is a non-conflicting feature. i.e, the kernel
can safely run with a mix of CPUs with some using the feature
and the others don't. So, it is safe for a late CPU to have
this capability and enable it, even if the active CPUs don't.

To get this handled properly by the infrastructure, we
unconditionally set the capability and only enable it
on CPUs which really have the feature. Also, we print the
feature detection from the "matches" call back to make sure
we don't mislead the user when none of the CPUs could use the
feature.

Change-Id: Icb987abe0a4d3eeae718c88e53fc9a8f3cce7f80
Cc: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarDave Martin <dave.martin@arm.com>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Git-commit: 05abb595bbaccc9c4290bee62086d0eeea9f0f32
Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git


[isaacm@codeaurora.org: remove code that is not needed for
 having a way of expressing the list of CPUs that would
 be impacted by erratum 1024718. Add calls to
 preemption_disable and preemption_enable to conform to
 the proper use of has_cpuid_feature]
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
parent e7f6706b
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -43,7 +43,8 @@
#define ARM64_UNMAP_KERNEL_AT_EL0		23
#define ARM64_UNMAP_KERNEL_AT_EL0		23
#define ARM64_HARDEN_BRANCH_PREDICTOR		24
#define ARM64_HARDEN_BRANCH_PREDICTOR		24
#define ARM64_HARDEN_BP_POST_GUEST_EXIT		25
#define ARM64_HARDEN_BP_POST_GUEST_EXIT		25
#define ARM64_HW_DBM				26


#define ARM64_NCAPS				26
#define ARM64_NCAPS				27


#endif /* __ASM_CPUCAPS_H */
#endif /* __ASM_CPUCAPS_H */
+81 −0
Original line number Original line Diff line number Diff line
@@ -880,6 +880,68 @@ static int __init parse_kpti(char *str)
__setup("kpti=", parse_kpti);
__setup("kpti=", parse_kpti);
#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */


#ifdef CONFIG_ARM64_HW_AFDBM
static inline void __cpu_enable_hw_dbm(void)
{
	u64 tcr = read_sysreg(tcr_el1) | TCR_HD;

	write_sysreg(tcr, tcr_el1);
	isb();
}

static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
{
	bool has_cpu_feature;

	preempt_disable();
	has_cpu_feature = has_cpuid_feature(cap, SCOPE_LOCAL_CPU);
	preempt_enable();

	return has_cpu_feature;
}

static int cpu_enable_hw_dbm(void *entry)
{
	const struct arm64_cpu_capabilities *cap =
		(const struct arm64_cpu_capabilities *) entry;

	if (cpu_can_use_dbm(cap))
		__cpu_enable_hw_dbm();

	return 0;
}

static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
		       int __unused)
{
	static bool detected = false;
	/*
	 * DBM is a non-conflicting feature. i.e, the kernel can safely
	 * run a mix of CPUs with and without the feature. So, we
	 * unconditionally enable the capability to allow any late CPU
	 * to use the feature. We only enable the control bits on the
	 * CPU, if it actually supports.
	 *
	 * We have to make sure we print the "feature" detection only
	 * when at least one CPU actually uses it. So check if this CPU
	 * can actually use it and print the message exactly once.
	 *
	 * This is safe as all CPUs (including secondary CPUs - due to the
	 * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
	 * goes through the "matches" check exactly once. Also if a CPU
	 * matches the criteria, it is guaranteed that the CPU will turn
	 * the DBM on, as the capability is unconditionally enabled.
	 */
	if (!detected && cpu_can_use_dbm(cap)) {
		detected = true;
		pr_info("detected: Hardware dirty bit management\n");
	}

	return true;
}

#endif

static const struct arm64_cpu_capabilities arm64_features[] = {
static const struct arm64_cpu_capabilities arm64_features[] = {
	{
	{
		.desc = "GIC system register CPU interface",
		.desc = "GIC system register CPU interface",
@@ -992,6 +1054,25 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
		.min_field_value = 1,
		.min_field_value = 1,
	},
	},
#endif
#ifdef CONFIG_ARM64_HW_AFDBM
	{
		/*
		 * Since we turn this on always, we don't want the user to
		 * think that the feature is available when it may not be.
		 * So hide the description.
		 *
		 * .desc = "Hardware pagetable Dirty Bit Management",
		 *
		 */
		.capability = ARM64_HW_DBM,
		.sys_reg = SYS_ID_AA64MMFR1_EL1,
		.sign = FTR_UNSIGNED,
		.field_pos = ID_AA64MMFR1_HADBS_SHIFT,
		.min_field_value = 2,
		.matches = has_hw_dbm,
		.enable = cpu_enable_hw_dbm,
	},
#endif
#endif
	{},
	{},
};
};
+6 −12
Original line number Original line Diff line number Diff line
@@ -477,21 +477,15 @@ ENTRY(__cpu_setup)
	bfi	x10, x9, #32, #3
	bfi	x10, x9, #32, #3
#ifdef CONFIG_ARM64_HW_AFDBM
#ifdef CONFIG_ARM64_HW_AFDBM
	/*
	/*
	 * Hardware update of the Access and Dirty bits.
	 * Enable hardware update of the Access Flags bit.
	 * Hardware dirty bit management is enabled later,
	 * via capabilities.
	 */
	 */
	mrs	x9, ID_AA64MMFR1_EL1
	mrs	x9, ID_AA64MMFR1_EL1
	and	x9, x9, #0xf
	and	x9, x9, #0xf
	cbz	x9, 2f
	cbz	x9, 1f
	cmp	x9, #2
	orr	x10, x10, #TCR_HA		// hardware Access flag update
	b.lt	1f
1:
#ifdef CONFIG_ARM64_ERRATUM_1024718
	/* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */
	cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4
	cbnz	x1, 1f
#endif
	orr	x10, x10, #TCR_HD		// hardware Dirty flag update
1:	orr	x10, x10, #TCR_HA		// hardware Access flag update
2:
#endif	/* CONFIG_ARM64_HW_AFDBM */
#endif	/* CONFIG_ARM64_HW_AFDBM */
	msr	tcr_el1, x10
	msr	tcr_el1, x10
	ret					// return to head.S
	ret					// return to head.S