[PATCH v1 4/4] arm64/mte: Add userspace interface for enabling asymmetric mode

Vincenzo Frascino vincenzo.frascino at arm.com
Fri Jan 28 09:12:20 PST 2022



On 1/27/22 7:57 PM, Mark Brown wrote:
> The architecture provides an asymmetric mode for MTE where tag mismatches
> are checked asynchronously for reads but synchronously for loads. 

MTE3 checks synchronously the reads and asynchronously the writes.

Nit: Please use load/store or read/write.


> Allow userspace processes to select this and make it available as a default mode
> via the existing per-CPU sysfs interface.
> 
> Since there PR_MTE_TCF_ values are a bitmask (allowing the kernel to choose
> between the multiple modes) and there are no free bits adjacent to the
> existing PR_MTE_TCF_ bits the set of bits used to specify the mode becomes
> disjoint. Programs using the new interface should be aware of this and
> programs that do not use it will not see any change in behaviour.
> > When userspace requests two possible modes but the system default for the
> CPU is the third mode (eg, default is synchronous but userspace requests
> either asynchronous or asymmetric) the preference order is:
> 
>    ASYMM > ASYNC > SYNC
> 
> This situation is not currently possible since there are only two modes and
> it is mandatory to have a system default so there could be no ambiguity and
> there is no ABI change. The chosen order is basically arbitrary as we do not
> have a clear metric for what is better here.
> 
> If userspace requests specifically asymmetric mode via the prctl() and the
> system does not support it then we will return an error, this mirrors
> how we handle the case where userspace enables MTE on a system that does
> not support MTE at all and the behaviour that will be seen if running on
> an older kernel that does not support userspace use of asymmetric mode.
> 
> Attempts to set asymmetric mode as the default mode will result in an error
> if the system does not support it.
> 
> Signed-off-by: Mark Brown <broonie at kernel.org>

Otherwise:

Reviewed-by: Vincenzo Frascino <Vincenzo.Frascino at arm.com>

> ---
>  arch/arm64/include/asm/processor.h |  1 +
>  arch/arm64/kernel/mte.c            | 12 +++++++++++-
>  arch/arm64/kernel/process.c        |  5 ++++-
>  include/uapi/linux/prctl.h         |  4 +++-
>  4 files changed, 19 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
> index 6f41b65f9962..73e38d9a540c 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -21,6 +21,7 @@
>  
>  #define MTE_CTRL_TCF_SYNC		(1UL << 16)
>  #define MTE_CTRL_TCF_ASYNC		(1UL << 17)
> +#define MTE_CTRL_TCF_ASYMM		(1UL << 18)
>  
>  #ifndef __ASSEMBLY__
>  
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index fa4001fee12a..fb777d8fea32 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -215,7 +215,9 @@ static void mte_update_sctlr_user(struct task_struct *task)
>  	 * set bits and map into register values determines our
>  	 * default order.
>  	 */
> -	if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
> +	if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM)
> +		sctlr |= SCTLR_EL1_TCF0_ASYMM;
> +	else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
>  		sctlr |= SCTLR_EL1_TCF0_ASYNC;
>  	else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
>  		sctlr |= SCTLR_EL1_TCF0_SYNC;
> @@ -306,6 +308,8 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
>  		mte_ctrl |= MTE_CTRL_TCF_ASYNC;
>  	if (arg & PR_MTE_TCF_SYNC)
>  		mte_ctrl |= MTE_CTRL_TCF_SYNC;
> +	if (arg & PR_MTE_TCF_ASYMM)
> +		mte_ctrl |= MTE_CTRL_TCF_ASYMM;
>  
>  	task->thread.mte_ctrl = mte_ctrl;
>  	if (task == current) {
> @@ -334,6 +338,8 @@ long get_mte_ctrl(struct task_struct *task)
>  		ret |= PR_MTE_TCF_ASYNC;
>  	if (mte_ctrl & MTE_CTRL_TCF_SYNC)
>  		ret |= PR_MTE_TCF_SYNC;
> +	if (mte_ctrl & MTE_CTRL_TCF_ASYMM)
> +		ret |= PR_MTE_TCF_ASYMM;
>  
>  	return ret;
>  }
> @@ -481,6 +487,8 @@ static ssize_t mte_tcf_preferred_show(struct device *dev,
>  		return sysfs_emit(buf, "async\n");
>  	case MTE_CTRL_TCF_SYNC:
>  		return sysfs_emit(buf, "sync\n");
> +	case MTE_CTRL_TCF_ASYMM:
> +		return sysfs_emit(buf, "asymm\n");
>  	default:
>  		return sysfs_emit(buf, "???\n");
>  	}
> @@ -496,6 +504,8 @@ static ssize_t mte_tcf_preferred_store(struct device *dev,
>  		tcf = MTE_CTRL_TCF_ASYNC;
>  	else if (sysfs_streq(buf, "sync"))
>  		tcf = MTE_CTRL_TCF_SYNC;
> +	else if (cpus_have_cap(ARM64_MTE_ASYMM) && sysfs_streq(buf, "asymm"))
> +		tcf = MTE_CTRL_TCF_ASYMM;
>  	else
>  		return -EINVAL;
>  
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 5369e649fa79..941cfa7117b9 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -635,7 +635,10 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
>  		return -EINVAL;
>  
>  	if (system_supports_mte())
> -		valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
> +		valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
> +			| PR_MTE_TAG_MASK;
> +	if (cpus_have_cap(ARM64_MTE_ASYMM))
> +		valid_mask |= PR_MTE_TCF_ASYMM;
>  
>  	if (arg & ~valid_mask)
>  		return -EINVAL;
> diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
> index e998764f0262..4ae2b21e4066 100644
> --- a/include/uapi/linux/prctl.h
> +++ b/include/uapi/linux/prctl.h
> @@ -238,7 +238,9 @@ struct prctl_mm_map {
>  # define PR_MTE_TCF_NONE		0UL
>  # define PR_MTE_TCF_SYNC		(1UL << 1)
>  # define PR_MTE_TCF_ASYNC		(1UL << 2)
> -# define PR_MTE_TCF_MASK		(PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
> +# define PR_MTE_TCF_ASYMM		(1UL << 19)
> +# define PR_MTE_TCF_MASK		(PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC | \
> +					 PR_MTE_TCF_ASYMM)
>  /* MTE tag inclusion mask */
>  # define PR_MTE_TAG_SHIFT		3
>  # define PR_MTE_TAG_MASK		(0xffffUL << PR_MTE_TAG_SHIFT)
> 

-- 
Regards,
Vincenzo



More information about the linux-arm-kernel mailing list