[PATCH v4 08/13] perf: RISC-V: Introduce Andes PMU for perf event sampling
Lad, Prabhakar
prabhakar.csengg at gmail.com
Fri Nov 24 07:06:41 PST 2023
On Wed, Nov 22, 2023 at 12:18 PM Yu Chien Peter Lin
<peterlin at andestech.com> wrote:
>
> The Andes PMU extension provides the same mechanism as Sscofpmf,
> allowing us to reuse the SBI PMU driver to support event sampling
> and mode filtering.
>
> To make use of this custom PMU extension, "xandespmu" needs
> to be appended to the riscv,isa-extensions for each cpu node
> in device-tree, and enable CONFIG_ANDES_CUSTOM_PMU.
>
> Signed-off-by: Yu Chien Peter Lin <peterlin at andestech.com>
> Reviewed-by: Charles Ci-Jyun Wu <dminus at andestech.com>
> Reviewed-by: Leo Yu-Chi Liang <ycliang at andestech.com>
> Co-developed-by: Locus Wei-Han Chen <locus84 at andestech.com>
> Signed-off-by: Locus Wei-Han Chen <locus84 at andestech.com>
> ---
> Changes v1 -> v2:
> - New patch
> Changes v2 -> v3:
> - Reordered list in riscv_isa_ext[]
> - Removed mvendorid check in pmu_sbi_setup_irqs()
> Changes v3 -> v4:
> - No change
> ---
> arch/riscv/include/asm/hwcap.h | 1 +
> arch/riscv/kernel/cpufeature.c | 1 +
> drivers/perf/Kconfig | 14 ++++++++++++++
> drivers/perf/riscv_pmu_sbi.c | 30 +++++++++++++++++++++++++-----
> 4 files changed, 41 insertions(+), 5 deletions(-)
>
Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj at bp.renesas.com>
Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj at bp.renesas.com>
Cheers,
Prabhakar
> diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
> index c85ee34c78d9..cbfbc3505d2c 100644
> --- a/arch/riscv/include/asm/hwcap.h
> +++ b/arch/riscv/include/asm/hwcap.h
> @@ -58,6 +58,7 @@
> #define RISCV_ISA_EXT_SMSTATEEN 43
> #define RISCV_ISA_EXT_ZICOND 44
> #define RISCV_ISA_EXT_XTHEADPMU 45
> +#define RISCV_ISA_EXT_XANDESPMU 46
>
> #define RISCV_ISA_EXT_MAX 64
>
> diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
> index e606f588d366..42fda134c4a3 100644
> --- a/arch/riscv/kernel/cpufeature.c
> +++ b/arch/riscv/kernel/cpufeature.c
> @@ -185,6 +185,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
> __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
> __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
> __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
> + __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_EXT_XANDESPMU),
> __RISCV_ISA_EXT_DATA(xtheadpmu, RISCV_ISA_EXT_XTHEADPMU),
> };
>
> diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
> index c71b6f16bdfa..c1a490829d15 100644
> --- a/drivers/perf/Kconfig
> +++ b/drivers/perf/Kconfig
> @@ -86,6 +86,20 @@ config RISCV_PMU_SBI
> full perf feature support i.e. counter overflow, privilege mode
> filtering, counter configuration.
>
> +config ANDES_CUSTOM_PMU
> + bool "Andes custom PMU support"
> + depends on RISCV_ALTERNATIVE && RISCV_PMU_SBI
> + default y
> + help
> + The Andes cores implement a PMU overflow extension very
> + similar to the core SSCOFPMF extension.
> +
> + This will patch the overflow/pending CSR and handle the
> + non-standard behaviour via the regular SBI PMU driver and
> + interface.
> +
> + If you don't know what to do here, say "Y".
> +
> config THEAD_CUSTOM_PMU
> bool "T-Head custom PMU support"
> depends on RISCV_ALTERNATIVE && RISCV_PMU_SBI
> diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
> index 31ca79846399..1e0c709efbfc 100644
> --- a/drivers/perf/riscv_pmu_sbi.c
> +++ b/drivers/perf/riscv_pmu_sbi.c
> @@ -19,6 +19,7 @@
> #include <linux/of.h>
> #include <linux/cpu_pm.h>
> #include <linux/sched/clock.h>
> +#include <linux/soc/andes/irq.h>
>
> #include <asm/sbi.h>
> #include <asm/cpufeature.h>
> @@ -27,14 +28,26 @@
> #define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
>
> #define ALT_SBI_PMU_OVERFLOW(__ovl) \
> -asm volatile(ALTERNATIVE( \
> +asm volatile(ALTERNATIVE_2( \
> "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
> "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
> 0, RISCV_ISA_EXT_XTHEADPMU, \
> - CONFIG_THEAD_CUSTOM_PMU) \
> + CONFIG_THEAD_CUSTOM_PMU, \
> + "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \
> + 0, RISCV_ISA_EXT_XANDESPMU, \
> + CONFIG_ANDES_CUSTOM_PMU) \
> : "=r" (__ovl) : \
> : "memory")
>
> +#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \
> +asm volatile(ALTERNATIVE( \
> + "csrc " __stringify(CSR_IP) ", %0\n\t", \
> + "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \
> + 0, RISCV_ISA_EXT_XANDESPMU, \
> + CONFIG_ANDES_CUSTOM_PMU) \
> + : : "r"(__irq_mask) \
> + : "memory")
> +
> #define SYSCTL_NO_USER_ACCESS 0
> #define SYSCTL_USER_ACCESS 1
> #define SYSCTL_LEGACY 2
> @@ -72,6 +85,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
> static union sbi_pmu_ctr_info *pmu_ctr_list;
> static bool riscv_pmu_use_irq;
> static unsigned int riscv_pmu_irq_num;
> +static unsigned int riscv_pmu_irq_mask;
> static unsigned int riscv_pmu_irq;
>
> /* Cache the available counters in a bitmask */
> @@ -705,7 +719,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
>
> event = cpu_hw_evt->events[fidx];
> if (!event) {
> - csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
> + ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
> return IRQ_NONE;
> }
>
> @@ -719,7 +733,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
> * Overflow interrupt pending bit should only be cleared after stopping
> * all the counters to avoid any race condition.
> */
> - csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
> + ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
>
> /* No overflow bit is set */
> if (!overflow)
> @@ -791,7 +805,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
>
> if (riscv_pmu_use_irq) {
> cpu_hw_evt->irq = riscv_pmu_irq;
> - csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
> + ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
> enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
> }
>
> @@ -823,8 +837,14 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
> IS_ENABLED(CONFIG_THEAD_CUSTOM_PMU)) {
> riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
> riscv_pmu_use_irq = true;
> + } else if (riscv_isa_extension_available(NULL, XANDESPMU) &&
> + IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
> + riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMU;
> + riscv_pmu_use_irq = true;
> }
>
> + riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG);
> +
> if (!riscv_pmu_use_irq)
> return -EOPNOTSUPP;
>
> --
> 2.34.1
>
>
More information about the linux-riscv
mailing list