[PATCH v3 1/3] arm64: ptrauth: add pointer authentication Armv8.6 enhanced feature

Amit Daniel Kachhap amit.kachhap at arm.com
Tue Jun 23 09:16:28 EDT 2020


Hi,

On 6/22/20 7:52 PM, Dave Martin wrote:
> On Thu, Jun 18, 2020 at 10:40:27AM +0530, Amit Daniel Kachhap wrote:
>> This patch add changes for Pointer Authentication enhanced features
>> mandatory for Armv8.6. These features are,
>>
>> * An enhanced PAC generation logic which hardens finding the correct
>>    PAC value of the authenticated pointer. However, no code change done
>>    for this.
>>
>> * Fault(FPAC) is generated now when the ptrauth authentication instruction
>>    fails in authenticating the PAC present in the address. This is different
>>    from earlier case when such failures just adds an error code in the top
>>    byte and waits for subsequent load/store to abort. The ptrauth
>>    instructions which may cause this fault are autiasp, retaa etc.
>>
>> The above features are now represented by additional configurations
>> for the Address Authentication cpufeature.
>>
>> The fault received in the kernel due to FPAC is treated as Illegal
>> instruction and hence signal SIGILL is injected with ILL_ILLOPN as the
>> signal code. Note that this is different from earlier ARMv8.3 ptrauth
>> where signal SIGSEGV is issued due to Pointer authentication failures.
> 
> Seems reasonable.  It's a little unfortunate that the behaviour changes
> here, but I don't see what alternative we have.  And getting a
> sunchronous fault is certainly better for debugging.
> 
>>
>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap at arm.com>
>> ---
>> No change since v2.
>>
>>   arch/arm64/include/asm/esr.h       |  4 +++-
>>   arch/arm64/include/asm/exception.h |  1 +
>>   arch/arm64/include/asm/sysreg.h    | 24 ++++++++++++++++--------
>>   arch/arm64/kernel/entry-common.c   | 24 ++++++++++++++++++++++++
>>   arch/arm64/kernel/traps.c          |  9 +++++++++
>>   5 files changed, 53 insertions(+), 9 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
>> index 035003acfa87..22c81f1edda2 100644
>> --- a/arch/arm64/include/asm/esr.h
>> +++ b/arch/arm64/include/asm/esr.h
>> @@ -35,7 +35,9 @@
>>   #define ESR_ELx_EC_SYS64	(0x18)
>>   #define ESR_ELx_EC_SVE		(0x19)
>>   #define ESR_ELx_EC_ERET		(0x1a)	/* EL2 only */
>> -/* Unallocated EC: 0x1b - 0x1E */
>> +/* Unallocated EC: 0x1B */
>> +#define ESR_ELx_EC_FPAC		(0x1C)	/* EL1 and above */
>> +/* Unallocated EC: 0x1D - 0x1E */
>>   #define ESR_ELx_EC_IMP_DEF	(0x1f)	/* EL3 only */
>>   #define ESR_ELx_EC_IABT_LOW	(0x20)
>>   #define ESR_ELx_EC_IABT_CUR	(0x21)
>> diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
>> index 7577a754d443..3140b90f2e4f 100644
>> --- a/arch/arm64/include/asm/exception.h
>> +++ b/arch/arm64/include/asm/exception.h
>> @@ -47,4 +47,5 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
>>   void do_cp15instr(unsigned int esr, struct pt_regs *regs);
>>   void do_el0_svc(struct pt_regs *regs);
>>   void do_el0_svc_compat(struct pt_regs *regs);
>> +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
>>   #endif	/* __ASM_EXCEPTION_H */
>> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
>> index 463175f80341..c71bcd0c002a 100644
>> --- a/arch/arm64/include/asm/sysreg.h
>> +++ b/arch/arm64/include/asm/sysreg.h
>> @@ -633,14 +633,22 @@
>>   #define ID_AA64ISAR1_APA_SHIFT		4
>>   #define ID_AA64ISAR1_DPB_SHIFT		0
>>   
>> -#define ID_AA64ISAR1_APA_NI		0x0
>> -#define ID_AA64ISAR1_APA_ARCHITECTED	0x1
>> -#define ID_AA64ISAR1_API_NI		0x0
>> -#define ID_AA64ISAR1_API_IMP_DEF	0x1
>> -#define ID_AA64ISAR1_GPA_NI		0x0
>> -#define ID_AA64ISAR1_GPA_ARCHITECTED	0x1
>> -#define ID_AA64ISAR1_GPI_NI		0x0
>> -#define ID_AA64ISAR1_GPI_IMP_DEF	0x1
>> +#define ID_AA64ISAR1_APA_NI			0x0
>> +#define ID_AA64ISAR1_APA_ARCHITECTED		0x1
>> +#define ID_AA64ISAR1_APA_ARCH_EPAC		0x2
>> +#define ID_AA64ISAR1_APA_ARCH_EPAC2		0x3
>> +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC	0x4
>> +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB	0x5
>> +#define ID_AA64ISAR1_API_NI			0x0
>> +#define ID_AA64ISAR1_API_IMP_DEF		0x1
>> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC		0x2
>> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2		0x3
>> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC	0x4
>> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB	0x5
>> +#define ID_AA64ISAR1_GPA_NI			0x0
>> +#define ID_AA64ISAR1_GPA_ARCHITECTED		0x1
>> +#define ID_AA64ISAR1_GPI_NI			0x0
>> +#define ID_AA64ISAR1_GPI_IMP_DEF		0x1
>>   
>>   /* id_aa64pfr0 */
>>   #define ID_AA64PFR0_CSV3_SHIFT		60
>> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
>> index 3dbdf9752b11..08a4805a5cca 100644
>> --- a/arch/arm64/kernel/entry-common.c
>> +++ b/arch/arm64/kernel/entry-common.c
>> @@ -66,6 +66,13 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
>>   }
>>   NOKPROBE_SYMBOL(el1_dbg);
>>   
>> +static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
>> +{
>> +	local_daif_inherit(regs);
>> +	do_ptrauth_fault(regs, esr);
>> +}
>> +NOKPROBE_SYMBOL(el1_fpac);
>> +
>>   asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
>>   {
>>   	unsigned long esr = read_sysreg(esr_el1);
>> @@ -92,6 +99,9 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
>>   	case ESR_ELx_EC_BRK64:
>>   		el1_dbg(regs, esr);
>>   		break;
>> +	case ESR_ELx_EC_FPAC:
>> +		el1_fpac(regs, esr);
>> +		break;
>>   	default:
>>   		el1_inv(regs, esr);
>>   	}
>> @@ -227,6 +237,14 @@ static void notrace el0_svc(struct pt_regs *regs)
>>   }
>>   NOKPROBE_SYMBOL(el0_svc);
>>   
>> +static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
>> +{
>> +	user_exit_irqoff();
>> +	local_daif_restore(DAIF_PROCCTX);
>> +	do_ptrauth_fault(regs, esr);
>> +}
>> +NOKPROBE_SYMBOL(el0_fpac);
>> +
>>   asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
>>   {
>>   	unsigned long esr = read_sysreg(esr_el1);
>> @@ -272,6 +290,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
>>   	case ESR_ELx_EC_BRK64:
>>   		el0_dbg(regs, esr);
>>   		break;
>> +	case ESR_ELx_EC_FPAC:
>> +		el0_fpac(regs, esr);
>> +		break;
>>   	default:
>>   		el0_inv(regs, esr);
>>   	}
>> @@ -335,6 +356,9 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
>>   	case ESR_ELx_EC_BKPT32:
>>   		el0_dbg(regs, esr);
>>   		break;
>> +	case ESR_ELx_EC_FPAC:
>> +		el0_fpac(regs, esr);
>> +		break;
> 
> Can this exception ever happen?  I thought ptrauth doesn't exist for
> AArch32.

This path will be taken during FPAC fault from userspace(EL0). Am I 
missing something here?

> 
>>   	default:
>>   		el0_inv(regs, esr);
>>   	}
>> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
>> index 50cc30acf106..f596ef585560 100644
>> --- a/arch/arm64/kernel/traps.c
>> +++ b/arch/arm64/kernel/traps.c
>> @@ -479,6 +479,14 @@ void do_bti(struct pt_regs *regs)
>>   }
>>   NOKPROBE_SYMBOL(do_bti);
>>   
>> +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
>> +{
>> +	BUG_ON(!user_mode(regs));
> 
> Doesn't el1_fpac() call this?  How does this interact with in-kernel
> pointer auth?

Yes called from el1_fpac. Currently we crash the kernel when this fpac 
occurs. This is similar to do_undefinstr implementation. Anyway it can 
be crashed from el1_fpac also.

Thanks,
Amit

> 
>> +	arm64_notify_die("pointer authentication fault", regs, SIGILL,
>> +			 ILL_ILLOPN, (void __user *)regs->pc, esr);
>> +}
>> +NOKPROBE_SYMBOL(do_ptrauth_fault);
> 
> [...]
> 
> Cheers
> ---Dave
> 



More information about the linux-arm-kernel mailing list