[PATCH 7/8] ARM: perf: Allow the use of the PMUv3 driver on 32bit ARM

Marc Zyngier maz at kernel.org
Wed Feb 8 04:51:10 PST 2023


On Thu, 26 Jan 2023 20:44:43 +0000,
Zaid Al-Bassam <zalbassam at google.com> wrote:
> 
> From: Marc Zyngier <marc.zyngier at arm.com>
> 
> The only thing stopping the PMUv3 driver from compiling on 32bit
> is the lack of defined system registers names. This is easily
> solved by providing the sysreg accessors and updating the Kconfig entry.
> 
> Signed-off-by: Marc Zyngier <marc.zyngier at arm.com>
> Co-developed-by: Zaid Al-Bassam <zalbassam at google.com>
> Signed-off-by: Zaid Al-Bassam <zalbassam at google.com>
> ---
>  arch/arm/include/asm/arm_pmuv3.h | 238 +++++++++++++++++++++++++++++++
>  drivers/perf/Kconfig             |   5 +-
>  2 files changed, 240 insertions(+), 3 deletions(-)
>  create mode 100644 arch/arm/include/asm/arm_pmuv3.h
> 
> diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
> new file mode 100644
> index 000000000000..816873c74eda
> --- /dev/null
> +++ b/arch/arm/include/asm/arm_pmuv3.h
> @@ -0,0 +1,238 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2012 ARM Ltd.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ASM_PMUV3_H
> +#define __ASM_PMUV3_H
> +
> +#include <asm/cp15.h>
> +#include <asm/cputype.h>
> +
> +#define PMCCNTR			__ACCESS_CP15_64(0, c9)
> +
> +#define PMCR			__ACCESS_CP15(c9,  0, c12, 0)
> +#define PMCNTENSET		__ACCESS_CP15(c9,  0, c12, 1)
> +#define PMCNTENCLR		__ACCESS_CP15(c9,  0, c12, 2)
> +#define PMOVSR			__ACCESS_CP15(c9,  0, c12, 3)
> +#define PMSELR			__ACCESS_CP15(c9,  0, c12, 5)
> +#define PMCEID0			__ACCESS_CP15(c9,  0, c12, 6)
> +#define PMCEID1			__ACCESS_CP15(c9,  0, c12, 7)
> +#define PMXEVTYPER		__ACCESS_CP15(c9,  0, c13, 1)
> +#define PMXEVCNTR		__ACCESS_CP15(c9,  0, c13, 2)
> +#define PMUSERENR		__ACCESS_CP15(c9,  0, c14, 0)
> +#define PMINTENSET		__ACCESS_CP15(c9,  0, c14, 1)
> +#define PMINTENCLR		__ACCESS_CP15(c9,  0, c14, 2)
> +#define PMMIR			__ACCESS_CP15(c9,  0, c14, 6)
> +#define PMCCFILTR		__ACCESS_CP15(c14, 0, c15, 7)
> +#define PMEVCNTR0(n)	__ACCESS_CP15(c14, 0, c8, n)
> +#define PMEVCNTR1(n)	__ACCESS_CP15(c14, 0, c9, n)
> +#define PMEVCNTR2(n)	__ACCESS_CP15(c14, 0, c10, n)
> +#define PMEVCNTR3(n)	__ACCESS_CP15(c14, 0, c11, n)
> +#define PMEVTYPER0(n)	__ACCESS_CP15(c14, 0, c12, n)
> +#define PMEVTYPER1(n)	__ACCESS_CP15(c14, 0, c13, n)
> +#define PMEVTYPER2(n)	__ACCESS_CP15(c14, 0, c14, n)
> +#define PMEVTYPER3(n)	__ACCESS_CP15(c14, 0, c15, n)
> +
> +#define PMEV_EVENTS_PER_REG		8
> +#define PMEV_REGISTER(n)		(n / PMEV_EVENTS_PER_REG)
> +#define PMEV_EVENT(n)			(n % PMEV_EVENTS_PER_REG)
> +
> +#define PMEV_CASE(reg, ev, case_macro)	\
> +	case ev:							\
> +		case_macro(reg, ev);			\
> +		break
> +
> +#define PMEV_EV_SWITCH(reg, ev, case_macro)	\
> +	do {									\
> +		switch (ev) {						\
> +		PMEV_CASE(reg, 0, case_macro);		\
> +		PMEV_CASE(reg, 1, case_macro);		\
> +		PMEV_CASE(reg, 2, case_macro);		\
> +		PMEV_CASE(reg, 3, case_macro);		\
> +		PMEV_CASE(reg, 4, case_macro);		\
> +		PMEV_CASE(reg, 5, case_macro);		\
> +		PMEV_CASE(reg, 6, case_macro);		\
> +		PMEV_CASE(reg, 7, case_macro);		\
> +		default:	\
> +			WARN(1, "Invalid PMEV* event index\n");	\
> +		}									\
> +	} while (0)

Please fix your editor, the indentation of the "\" is totally wrong.

> +
> +#define PMEV_REG_SWITCH(reg, ev, case_macro)	\
> +	do {										\
> +		switch (reg) {							\
> +		case 0:									\
> +			PMEV_EV_SWITCH(0, ev, case_macro);	\
> +			break;								\
> +		case 1:									\
> +			PMEV_EV_SWITCH(1, ev, case_macro);	\
> +			break;								\
> +		case 2:									\
> +			PMEV_EV_SWITCH(2, ev, case_macro);	\
> +			break;								\
> +		case 3:									\
> +			PMEV_EV_SWITCH(3, ev, case_macro);	\
> +			break;								\
> +		default:								\
> +			WARN(1, "Invalid PMEV* register index\n");	\
> +		}										\
> +	} while (0)

No, please don't do that. This makes the whole thing unmaintainable to
macro abuse. It also makes the code generation absolutely horrible,
due to the switch nesting. Just look at the disassembly.

The right way to do that is to declare the registers, one after the
other, all 60 of them, and then use the arm64 big switch
*unchanged*. You could even share it between the two
architectures. The codegen is slightly bad (one big switch), and it is
now trivial to read.

See below for the actual change.

	M.

diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
index 816873c74eda..4d483e055519 100644
--- a/arch/arm/include/asm/arm_pmuv3.h
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -37,89 +37,132 @@
 #define PMINTENCLR		__ACCESS_CP15(c9,  0, c14, 2)
 #define PMMIR			__ACCESS_CP15(c9,  0, c14, 6)
 #define PMCCFILTR		__ACCESS_CP15(c14, 0, c15, 7)
-#define PMEVCNTR0(n)	__ACCESS_CP15(c14, 0, c8, n)
-#define PMEVCNTR1(n)	__ACCESS_CP15(c14, 0, c9, n)
-#define PMEVCNTR2(n)	__ACCESS_CP15(c14, 0, c10, n)
-#define PMEVCNTR3(n)	__ACCESS_CP15(c14, 0, c11, n)
-#define PMEVTYPER0(n)	__ACCESS_CP15(c14, 0, c12, n)
-#define PMEVTYPER1(n)	__ACCESS_CP15(c14, 0, c13, n)
-#define PMEVTYPER2(n)	__ACCESS_CP15(c14, 0, c14, n)
-#define PMEVTYPER3(n)	__ACCESS_CP15(c14, 0, c15, n)
-
-#define PMEV_EVENTS_PER_REG		8
-#define PMEV_REGISTER(n)		(n / PMEV_EVENTS_PER_REG)
-#define PMEV_EVENT(n)			(n % PMEV_EVENTS_PER_REG)
-
-#define PMEV_CASE(reg, ev, case_macro)	\
-	case ev:							\
-		case_macro(reg, ev);			\
-		break
-
-#define PMEV_EV_SWITCH(reg, ev, case_macro)	\
-	do {									\
-		switch (ev) {						\
-		PMEV_CASE(reg, 0, case_macro);		\
-		PMEV_CASE(reg, 1, case_macro);		\
-		PMEV_CASE(reg, 2, case_macro);		\
-		PMEV_CASE(reg, 3, case_macro);		\
-		PMEV_CASE(reg, 4, case_macro);		\
-		PMEV_CASE(reg, 5, case_macro);		\
-		PMEV_CASE(reg, 6, case_macro);		\
-		PMEV_CASE(reg, 7, case_macro);		\
-		default:	\
-			WARN(1, "Invalid PMEV* event index\n");	\
-		}									\
-	} while (0)
 
-#define PMEV_REG_SWITCH(reg, ev, case_macro)	\
-	do {										\
-		switch (reg) {							\
-		case 0:									\
-			PMEV_EV_SWITCH(0, ev, case_macro);	\
-			break;								\
-		case 1:									\
-			PMEV_EV_SWITCH(1, ev, case_macro);	\
-			break;								\
-		case 2:									\
-			PMEV_EV_SWITCH(2, ev, case_macro);	\
-			break;								\
-		case 3:									\
-			PMEV_EV_SWITCH(3, ev, case_macro);	\
-			break;								\
-		default:								\
-			WARN(1, "Invalid PMEV* register index\n");	\
-		}										\
+#define PMEVCNTR0		__ACCESS_CP15(c14, 0, c8, 0)
+#define PMEVCNTR1		__ACCESS_CP15(c14, 0, c8, 1)
+#define PMEVCNTR2		__ACCESS_CP15(c14, 0, c8, 2)
+#define PMEVCNTR3		__ACCESS_CP15(c14, 0, c8, 3)
+#define PMEVCNTR4		__ACCESS_CP15(c14, 0, c8, 4)
+#define PMEVCNTR5		__ACCESS_CP15(c14, 0, c8, 5)
+#define PMEVCNTR6		__ACCESS_CP15(c14, 0, c8, 6)
+#define PMEVCNTR7		__ACCESS_CP15(c14, 0, c8, 7)
+#define PMEVCNTR8		__ACCESS_CP15(c14, 0, c9, 0)
+#define PMEVCNTR9		__ACCESS_CP15(c14, 0, c9, 1)
+#define PMEVCNTR10		__ACCESS_CP15(c14, 0, c9, 2)
+#define PMEVCNTR11		__ACCESS_CP15(c14, 0, c9, 3)
+#define PMEVCNTR12		__ACCESS_CP15(c14, 0, c9, 4)
+#define PMEVCNTR13		__ACCESS_CP15(c14, 0, c9, 5)
+#define PMEVCNTR14		__ACCESS_CP15(c14, 0, c9, 6)
+#define PMEVCNTR15		__ACCESS_CP15(c14, 0, c9, 7)
+#define PMEVCNTR16		__ACCESS_CP15(c14, 0, c10, 0)
+#define PMEVCNTR17		__ACCESS_CP15(c14, 0, c10, 1)
+#define PMEVCNTR18		__ACCESS_CP15(c14, 0, c10, 2)
+#define PMEVCNTR19		__ACCESS_CP15(c14, 0, c10, 3)
+#define PMEVCNTR20		__ACCESS_CP15(c14, 0, c10, 4)
+#define PMEVCNTR21		__ACCESS_CP15(c14, 0, c10, 5)
+#define PMEVCNTR22		__ACCESS_CP15(c14, 0, c10, 6)
+#define PMEVCNTR23		__ACCESS_CP15(c14, 0, c10, 7)
+#define PMEVCNTR24		__ACCESS_CP15(c14, 0, c11, 0)
+#define PMEVCNTR25		__ACCESS_CP15(c14, 0, c11, 1)
+#define PMEVCNTR26		__ACCESS_CP15(c14, 0, c11, 2)
+#define PMEVCNTR27		__ACCESS_CP15(c14, 0, c11, 3)
+#define PMEVCNTR28		__ACCESS_CP15(c14, 0, c11, 4)
+#define PMEVCNTR29		__ACCESS_CP15(c14, 0, c11, 5)
+#define PMEVCNTR30		__ACCESS_CP15(c14, 0, c11, 6)
+
+#define PMEVTYPER0		__ACCESS_CP15(c14, 0, c12, 0)
+#define PMEVTYPER1		__ACCESS_CP15(c14, 0, c12, 1)
+#define PMEVTYPER2		__ACCESS_CP15(c14, 0, c12, 2)
+#define PMEVTYPER3		__ACCESS_CP15(c14, 0, c12, 3)
+#define PMEVTYPER4		__ACCESS_CP15(c14, 0, c12, 4)
+#define PMEVTYPER5		__ACCESS_CP15(c14, 0, c12, 5)
+#define PMEVTYPER6		__ACCESS_CP15(c14, 0, c12, 6)
+#define PMEVTYPER7		__ACCESS_CP15(c14, 0, c12, 7)
+#define PMEVTYPER8		__ACCESS_CP15(c14, 0, c13, 0)
+#define PMEVTYPER9		__ACCESS_CP15(c14, 0, c13, 1)
+#define PMEVTYPER10		__ACCESS_CP15(c14, 0, c13, 2)
+#define PMEVTYPER11		__ACCESS_CP15(c14, 0, c13, 3)
+#define PMEVTYPER12		__ACCESS_CP15(c14, 0, c13, 4)
+#define PMEVTYPER13		__ACCESS_CP15(c14, 0, c13, 5)
+#define PMEVTYPER14		__ACCESS_CP15(c14, 0, c13, 6)
+#define PMEVTYPER15		__ACCESS_CP15(c14, 0, c13, 7)
+#define PMEVTYPER16		__ACCESS_CP15(c14, 0, c14, 0)
+#define PMEVTYPER17		__ACCESS_CP15(c14, 0, c14, 1)
+#define PMEVTYPER18		__ACCESS_CP15(c14, 0, c14, 2)
+#define PMEVTYPER19		__ACCESS_CP15(c14, 0, c14, 3)
+#define PMEVTYPER20		__ACCESS_CP15(c14, 0, c14, 4)
+#define PMEVTYPER21		__ACCESS_CP15(c14, 0, c14, 5)
+#define PMEVTYPER22		__ACCESS_CP15(c14, 0, c14, 6)
+#define PMEVTYPER23		__ACCESS_CP15(c14, 0, c14, 7)
+#define PMEVTYPER24		__ACCESS_CP15(c14, 0, c15, 0)
+#define PMEVTYPER25		__ACCESS_CP15(c14, 0, c15, 1)
+#define PMEVTYPER26		__ACCESS_CP15(c14, 0, c15, 2)
+#define PMEVTYPER27		__ACCESS_CP15(c14, 0, c15, 3)
+#define PMEVTYPER28		__ACCESS_CP15(c14, 0, c15, 4)
+#define PMEVTYPER29		__ACCESS_CP15(c14, 0, c15, 5)
+#define PMEVTYPER30		__ACCESS_CP15(c14, 0, c15, 6)
+
+#define PMEVN_CASE(n, case_macro) \
+	case n: case_macro(n); break
+
+#define PMEVN_SWITCH(x, case_macro)				\
+	do {							\
+		switch (x) {					\
+		PMEVN_CASE(0,  case_macro);			\
+		PMEVN_CASE(1,  case_macro);			\
+		PMEVN_CASE(2,  case_macro);			\
+		PMEVN_CASE(3,  case_macro);			\
+		PMEVN_CASE(4,  case_macro);			\
+		PMEVN_CASE(5,  case_macro);			\
+		PMEVN_CASE(6,  case_macro);			\
+		PMEVN_CASE(7,  case_macro);			\
+		PMEVN_CASE(8,  case_macro);			\
+		PMEVN_CASE(9,  case_macro);			\
+		PMEVN_CASE(10, case_macro);			\
+		PMEVN_CASE(11, case_macro);			\
+		PMEVN_CASE(12, case_macro);			\
+		PMEVN_CASE(13, case_macro);			\
+		PMEVN_CASE(14, case_macro);			\
+		PMEVN_CASE(15, case_macro);			\
+		PMEVN_CASE(16, case_macro);			\
+		PMEVN_CASE(17, case_macro);			\
+		PMEVN_CASE(18, case_macro);			\
+		PMEVN_CASE(19, case_macro);			\
+		PMEVN_CASE(20, case_macro);			\
+		PMEVN_CASE(21, case_macro);			\
+		PMEVN_CASE(22, case_macro);			\
+		PMEVN_CASE(23, case_macro);			\
+		PMEVN_CASE(24, case_macro);			\
+		PMEVN_CASE(25, case_macro);			\
+		PMEVN_CASE(26, case_macro);			\
+		PMEVN_CASE(27, case_macro);			\
+		PMEVN_CASE(28, case_macro);			\
+		PMEVN_CASE(29, case_macro);			\
+		PMEVN_CASE(30, case_macro);			\
+		default: WARN(1, "Invalid PMEV* index\n");	\
+		}						\
 	} while (0)
 
-#define RETURN_READ_PMEVCNTR(reg, ev) \
-	return read_sysreg(PMEVCNTR##reg(ev))
+#define RETURN_READ_PMEVCNTRN(n) \
+	return read_sysreg(PMEVCNTR##n)
 static unsigned long read_pmevcntrn(int n)
 {
-	const int reg = PMEV_REGISTER(n);
-	const int event = PMEV_EVENT(n);
-
-	PMEV_REG_SWITCH(reg, event, RETURN_READ_PMEVCNTR);
+	PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
 	return 0;
 }
 
-#define WRITE_PMEVCNTR(reg, ev) \
-	write_sysreg(val, PMEVCNTR##reg(ev))
+#define WRITE_PMEVCNTRN(n) \
+	write_sysreg(val, PMEVCNTR##n)
 static void write_pmevcntrn(int n, unsigned long val)
 {
-	const int reg = PMEV_REGISTER(n);
-	const int event = PMEV_EVENT(n);
-
-	PMEV_REG_SWITCH(reg, event, WRITE_PMEVCNTR);
+	PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
 }
 
-#define WRITE_PMEVTYPER(reg, ev) \
-	write_sysreg(val, PMEVTYPER##reg(ev))
+#define WRITE_PMEVTYPERN(n) \
+	write_sysreg(val, PMEVTYPER##n)
 static void write_pmevtypern(int n, unsigned long val)
 {
-	const int reg = PMEV_REGISTER(n);
-	const int event = PMEV_EVENT(n);
-
-	PMEV_REG_SWITCH(reg, event, WRITE_PMEVTYPER);
+	PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
 }
 
 static inline unsigned long read_pmmir(void)

-- 
Without deviation from the norm, progress is not possible.



More information about the linux-arm-kernel mailing list