Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 72c58395 authored by Catalin Marinas's avatar Catalin Marinas
Browse files

arm64: gicv3: Allow GICv3 compilation with older binutils



GICv3 introduces new system registers accessible with the full msr/mrs
syntax (e.g. mrs x0, Sop0_op1_CRm_CRn_op2). However, only recent
binutils understand the new syntax. This patch introduces msr_s/mrs_s
assembly macros which generate the equivalent instructions above and
converts the existing GICv3 code (both drivers/irqchip/ and
arch/arm64/kernel/).

Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reported-by: default avatarOlof Johansson <olof@lixom.net>
Tested-by: default avatarOlof Johansson <olof@lixom.net>
Suggested-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarJason Cooper <jason@lakedaemon.net>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
parent ecb3c2bb
Loading
Loading
Loading
Loading
+60 −0
Original line number Diff line number Diff line
/*
 * Macros for accessing system registers with older binutils.
 *
 * Copyright (C) 2014 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H

#define sys_reg(op0, op1, crn, crm, op2) \
	((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))

#ifdef __ASSEMBLY__

	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
	.equ	__reg_num_x\num, \num
	.endr
	.equ	__reg_num_xzr, 31

	.macro	mrs_s, rt, sreg
	.inst	0xd5300000|(\sreg)|(__reg_num_\rt)
	.endm

	.macro	msr_s, sreg, rt
	.inst	0xd5100000|(\sreg)|(__reg_num_\rt)
	.endm

#else

asm(
"	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
"	.equ	__reg_num_x\\num, \\num\n"
"	.endr\n"
"	.equ	__reg_num_xzr, 31\n"
"\n"
"	.macro	mrs_s, rt, sreg\n"
"	.inst	0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
"	.endm\n"
"\n"
"	.macro	msr_s, sreg, rt\n"
"	.inst	0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
"	.endm\n"
);

#endif

#endif	/* __ASM_SYSREG_H */
+3 −3
Original line number Diff line number Diff line
@@ -297,12 +297,12 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
	cmp	x0, #1
	b.ne	3f

	mrs	x0, ICC_SRE_EL2
	mrs_s	x0, ICC_SRE_EL2
	orr	x0, x0, #ICC_SRE_EL2_SRE	// Set ICC_SRE_EL2.SRE==1
	orr	x0, x0, #ICC_SRE_EL2_ENABLE	// Set ICC_SRE_EL2.Enable==1
	msr	ICC_SRE_EL2, x0
	msr_s	ICC_SRE_EL2, x0
	isb					// Make sure SRE is now set
	msr	ICH_HCR_EL2, xzr		// Reset ICC_HCR_EL2 to defaults
	msr_s	ICH_HCR_EL2, xzr		// Reset ICC_HCR_EL2 to defaults

3:
#endif
+8 −8
Original line number Diff line number Diff line
@@ -108,39 +108,39 @@ static u64 gic_read_iar(void)
{
	u64 irqstat;

	asm volatile("mrs %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
	asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
	return irqstat;
}

static void gic_write_pmr(u64 val)
{
	asm volatile("msr " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
	asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
}

static void gic_write_ctlr(u64 val)
{
	asm volatile("msr " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
	asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
	isb();
}

static void gic_write_grpen1(u64 val)
{
	asm volatile("msr " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
	asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
	isb();
}

static void gic_write_sgi1r(u64 val)
{
	asm volatile("msr " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
	asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
}

static void gic_enable_sre(void)
{
	u64 val;

	asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
	asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
	val |= ICC_SRE_EL1_SRE;
	asm volatile("msr " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
	asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
	isb();

	/*
@@ -150,7 +150,7 @@ static void gic_enable_sre(void)
	 *
	 * Kindly inform the luser.
	 */
	asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
	asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
	if (!(val & ICC_SRE_EL1_SRE))
		pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
}
+22 −20
Original line number Diff line number Diff line
@@ -18,6 +18,8 @@
#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
#define __LINUX_IRQCHIP_ARM_GIC_V3_H

#include <asm/sysreg.h>

/*
 * Distributor registers. We assume we're running non-secure, with ARE
 * being set. Secure-only and non-ARE registers are not described.
@@ -125,17 +127,17 @@
#define ICH_VMCR_PMR_SHIFT		24
#define ICH_VMCR_PMR_MASK		(0xffUL << ICH_VMCR_PMR_SHIFT)

#define ICC_EOIR1_EL1			S3_0_C12_C12_1
#define ICC_IAR1_EL1			S3_0_C12_C12_0
#define ICC_SGI1R_EL1			S3_0_C12_C11_5
#define ICC_PMR_EL1			S3_0_C4_C6_0
#define ICC_CTLR_EL1			S3_0_C12_C12_4
#define ICC_SRE_EL1			S3_0_C12_C12_5
#define ICC_GRPEN1_EL1			S3_0_C12_C12_7
#define ICC_EOIR1_EL1			sys_reg(3, 0, 12, 12, 1)
#define ICC_IAR1_EL1			sys_reg(3, 0, 12, 12, 0)
#define ICC_SGI1R_EL1			sys_reg(3, 0, 12, 11, 5)
#define ICC_PMR_EL1			sys_reg(3, 0, 4, 6, 0)
#define ICC_CTLR_EL1			sys_reg(3, 0, 12, 12, 4)
#define ICC_SRE_EL1			sys_reg(3, 0, 12, 12, 5)
#define ICC_GRPEN1_EL1			sys_reg(3, 0, 12, 12, 7)

#define ICC_IAR1_EL1_SPURIOUS		0x3ff

#define ICC_SRE_EL2			S3_4_C12_C9_5
#define ICC_SRE_EL2			sys_reg(3, 4, 12, 9, 5)

#define ICC_SRE_EL2_SRE			(1 << 0)
#define ICC_SRE_EL2_ENABLE		(1 << 3)
@@ -143,16 +145,16 @@
/*
 * System register definitions
 */
#define ICH_VSEIR_EL2			S3_4_C12_C9_4
#define ICH_HCR_EL2			S3_4_C12_C11_0
#define ICH_VTR_EL2			S3_4_C12_C11_1
#define ICH_MISR_EL2			S3_4_C12_C11_2
#define ICH_EISR_EL2			S3_4_C12_C11_3
#define ICH_ELSR_EL2			S3_4_C12_C11_5
#define ICH_VMCR_EL2			S3_4_C12_C11_7
#define ICH_VSEIR_EL2			sys_reg(3, 4, 12, 9, 4)
#define ICH_HCR_EL2			sys_reg(3, 4, 12, 11, 0)
#define ICH_VTR_EL2			sys_reg(3, 4, 12, 11, 1)
#define ICH_MISR_EL2			sys_reg(3, 4, 12, 11, 2)
#define ICH_EISR_EL2			sys_reg(3, 4, 12, 11, 3)
#define ICH_ELSR_EL2			sys_reg(3, 4, 12, 11, 5)
#define ICH_VMCR_EL2			sys_reg(3, 4, 12, 11, 7)

#define __LR0_EL2(x)			S3_4_C12_C12_ ## x
#define __LR8_EL2(x)			S3_4_C12_C13_ ## x
#define __LR0_EL2(x)			sys_reg(3, 4, 12, 12, x)
#define __LR8_EL2(x)			sys_reg(3, 4, 12, 13, x)

#define ICH_LR0_EL2			__LR0_EL2(0)
#define ICH_LR1_EL2			__LR0_EL2(1)
@@ -171,13 +173,13 @@
#define ICH_LR14_EL2			__LR8_EL2(6)
#define ICH_LR15_EL2			__LR8_EL2(7)

#define __AP0Rx_EL2(x)			S3_4_C12_C8_ ## x
#define __AP0Rx_EL2(x)			sys_reg(3, 4, 12, 8, x)
#define ICH_AP0R0_EL2			__AP0Rx_EL2(0)
#define ICH_AP0R1_EL2			__AP0Rx_EL2(1)
#define ICH_AP0R2_EL2			__AP0Rx_EL2(2)
#define ICH_AP0R3_EL2			__AP0Rx_EL2(3)

#define __AP1Rx_EL2(x)			S3_4_C12_C9_ ## x
#define __AP1Rx_EL2(x)			sys_reg(3, 4, 12, 9, x)
#define ICH_AP1R0_EL2			__AP1Rx_EL2(0)
#define ICH_AP1R1_EL2			__AP1Rx_EL2(1)
#define ICH_AP1R2_EL2			__AP1Rx_EL2(2)
@@ -189,7 +191,7 @@

static inline void gic_write_eoir(u64 irq)
{
	asm volatile("msr " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
	asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
	isb();
}