[PATCH RFC v1] arm64: Handle traps from accessing CNTVCT/CNTFRQ for CONFIG_COMPAT
Marc Zyngier
marc.zyngier at arm.com
Thu Jan 11 00:51:37 PST 2018
On 11/01/18 04:43, Nicolin Chen wrote:
> CONFIG_COMPAT allows ARM64 machine to run 32-bit instructions.
> Since the ARCH_TIMER_USR_VCT_ACCESS_EN might be disabled if a
> timer workaround is detected, accessing cntvct via mrrc will
> also trigger a trap.
>
> So this patch adds support to handle this situation.
>
> Tested with a user program generated by 32-bit compiler:
> int main()
> {
> unsigned long long cval;
> asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
> return 0;
> }
>
> Signed-off-by: Nicolin Chen <nicoleotsuka at gmail.com>
> ---
>
> [ I also added cntfrq here for safety as theoretically it could
> trigger the trap as well. However, my another test case (with
> mrc insturction) doesn't seem to trigger a trap. So I would
> drop it in the next version if someone can confirm it's not
> required. Thanks -- Nicolin ]
See my previous series on this very subject[1] as well as Will's reply.
>
> arch/arm64/include/asm/esr.h | 25 +++++++++++++++++++
> arch/arm64/include/asm/ptrace.h | 15 ++++++++++++
> arch/arm64/kernel/entry.S | 4 ++--
> arch/arm64/kernel/traps.c | 53 +++++++++++++++++++++++++++++++++++++++--
> 4 files changed, 93 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 014d7d8..55dea62 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -220,6 +220,31 @@
> (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
> ESR_ELx_SYS64_ISS_OP2_SHIFT))
>
> +/* ISS fields for MRC and MRS are very similar, so reuse SYS64 macros */
> +#define ESR_ELx_CP15_32_ISS_MRC_MASK ESR_ELx_SYS64_ISS_SYS_MASK
> +#define ESR_ELx_CP15_32_ISS_MRC_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(0, 0, 0, 14, 0) | \
> + ESR_ELx_SYS64_ISS_DIR_READ)
> +
> +/* ISS field definitions for CP15 MRRC/MCRR instructions (same for CP14) */
> +#define ESR_ELx_CP15_64_ISS_OPC1_SHIFT 16
> +#define ESR_ELx_CP15_64_ISS_OPC1_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_OPC1_SHIFT)
> +#define ESR_ELx_CP15_64_ISS_RT2_SHIFT 10
> +#define ESR_ELx_CP15_64_ISS_RT2_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT2_SHIFT)
> +#define ESR_ELx_CP15_64_ISS_RT_SHIFT 5
> +#define ESR_ELx_CP15_64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT_SHIFT)
> +#define ESR_ELx_CP15_64_ISS_CRM_SHIFT 1
> +#define ESR_ELx_CP15_64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_CRM_SHIFT)
> +#define ESR_ELx_CP15_64_ISS_MRRC_MASK (ESR_ELx_CP15_64_ISS_OPC1_MASK | \
> + ESR_ELx_CP15_64_ISS_CRM_MASK| \
> + ESR_ELx_SYS64_ISS_DIR_MASK)
> +
> +#define ESR_ELx_CP15_64_ISS_MRRC_VAL(opc1, crm) \
> + (((opc1) << ESR_ELx_CP15_64_ISS_OPC1_SHIFT) | \
> + ((crm) << ESR_ELx_CP15_64_ISS_CRM_SHIFT))
> +
> +#define ESR_ELx_CP15_64_ISS_MRRC_CNTVCT (ESR_ELx_CP15_64_ISS_MRRC_VAL(1, 14) | \
> + ESR_ELx_SYS64_ISS_DIR_READ)
> +
> #ifndef __ASSEMBLY__
> #include <asm/types.h>
>
> diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
> index 6069d66..50caf11 100644
> --- a/arch/arm64/include/asm/ptrace.h
> +++ b/arch/arm64/include/asm/ptrace.h
> @@ -243,6 +243,21 @@ static inline void pt_regs_write_reg(struct pt_regs *regs, int r,
> regs->regs[r] = val;
> }
>
> +/*
> + * Write two registers given architectural register index r and r2.
> + * Used by 32-bit MRRC and MCRR instrutions for 64-bit results
> + */
> +static inline void pt_regs_write_regs(struct pt_regs *regs, int r, int r2,
> + unsigned long val)
> +{
> + if (r != 31 && r2 != 31) {
> + /* Save lower 32 bits to register r */
> + regs->regs[r] = val & 0xffffffff;
> + /* Save higher 32 bits to register r2 */
> + regs->regs[r2] = val >> 32;
> + }
> +}
> +
> /* Valid only for Kernel mode traps. */
> static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
> {
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 6d14b8f..9d6cd95 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -636,9 +636,9 @@ el0_sync_compat:
> cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
> b.eq el0_undef
> cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
> - b.eq el0_undef
> + b.eq el0_sys
> cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
> - b.eq el0_undef
> + b.eq el0_sys
> cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
> b.eq el0_undef
> cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
> index 3d3588f..211cce7 100644
> --- a/arch/arm64/kernel/traps.c
> +++ b/arch/arm64/kernel/traps.c
> @@ -454,6 +454,17 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
> arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
> }
>
> +#ifdef CONFIG_COMPAT
> +static void cntvct_read_32_handler(unsigned int esr, struct pt_regs *regs)
> +{
> + int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
> + int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
> +
> + pt_regs_write_regs(regs, rt, rt2, arch_counter_get_cntvct());
> + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
> +}
> +#endif
> +
> static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
> {
> int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
> @@ -495,11 +506,49 @@ static struct sys64_hook sys64_hooks[] = {
> {},
> };
>
> +#ifdef CONFIG_COMPAT
> +static struct sys64_hook cp15_32_hooks[] = {
> + {
> + /* Trap read access to CNTFRQ via 32-bit insturction MRC */
> + .esr_mask = ESR_ELx_CP15_32_ISS_MRC_MASK,
> + .esr_val = ESR_ELx_CP15_32_ISS_MRC_CNTFRQ,
> + .handler = cntfrq_read_handler,
> + },
> + {},
> +};
> +
> +static struct sys64_hook cp15_64_hooks[] = {
> + {
> + /* Trap read access to CNTVCT via 32-bit insturction MRRC */
> + .esr_mask = ESR_ELx_CP15_64_ISS_MRRC_MASK,
> + .esr_val = ESR_ELx_CP15_64_ISS_MRRC_CNTVCT,
> + .handler = cntvct_read_32_handler,
> + },
> + {},
> +};
> +#endif
> +
> asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
> {
> - struct sys64_hook *hook;
> + struct sys64_hook *hook = NULL;
> +
> + switch (ESR_ELx_EC(esr)) {
> +#ifdef CONFIG_COMPAT
> + case ESR_ELx_EC_CP15_32:
> + hook = cp15_32_hooks;
> + break;
> + case ESR_ELx_EC_CP15_64:
> + hook = cp15_64_hooks;
> + break;
> +#endif
> + case ESR_ELx_EC_SYS64:
> + hook = sys64_hooks;
> + break;
> + default:
> + break;
> + }
>
> - for (hook = sys64_hooks; hook->handler; hook++)
> + for (; hook && hook->handler; hook++)
> if ((hook->esr_mask & esr) == hook->esr_val) {
> hook->handler(esr, regs);
> return;
>
Also, this code is fairly broken in its handling of conditional
instructions.
Thanks,
M.
[1]
http://lists.infradead.org/pipermail/linux-arm-kernel/2017-July/520426.html
--
Jazz is not dead. It just smells funny...
More information about the linux-arm-kernel
mailing list