[PATCH v4 1/4] mm: modify pte format for Svnapot
Conor.Dooley at microchip.com
Conor.Dooley at microchip.com
Mon Aug 22 13:45:07 PDT 2022
Hey Qinglin Pan,
Couple comments below.
On 22/08/2022 16:34, panqinglin2020 at iscas.ac.cn wrote:
> EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>
> From: Qinglin Pan <panqinglin2020 at iscas.ac.cn>
>
> This commit adds two erratas to enable/disable svnapot support, patches code
> dynamicly when "svnapot" is in the "riscv,isa" field of fdt and SVNAPOT
> compile option is set. It will influence the behavior of has_svnapot
> function and pte_pfn function. All code dependent on svnapot should make
> sure that has_svnapot return true firstly.
>
> Also, this commit modifies PTE definition for Svnapot, and creates some
> functions in pgtable.h to mark a PTE as napot and check if it is a Svnapot
> PTE. Until now, only 64KB napot size is supported in draft spec, so some
> macros has only 64KB version.
>
> Signed-off-by: Qinglin Pan <panqinglin2020 at iscas.ac.cn>
>
> diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
> index dc42375c2357..a23b71cf5979 100644
> --- a/arch/riscv/include/asm/pgtable-64.h
> +++ b/arch/riscv/include/asm/pgtable-64.h
> @@ -74,6 +74,20 @@ typedef struct {
> */
> #define _PAGE_PFN_MASK GENMASK(53, 10)
>
> +/*
> + * [63] Svnapot definitions:
> + * 0 Svnapot disabled
> + * 1 Svnapot enabled
> + */
> +#define _PAGE_NAPOT_SHIFT 63
> +#define _PAGE_NAPOT (1UL << _PAGE_NAPOT_SHIFT)
Is there any reason not to just make this BIT(_PAGE_NAPOT_SHIFT)?
> +#define NAPOT_CONT64KB_ORDER 4UL
> +#define NAPOT_CONT64KB_SHIFT (NAPOT_CONT64KB_ORDER + PAGE_SHIFT)
> +#define NAPOT_CONT64KB_SIZE (1UL << NAPOT_CONT64KB_SHIFT)
Ditto here, BIT(NAPOT_CONT64KB_SHIFT)?
> +#define NAPOT_CONT64KB_MASK (NAPOT_CONT64KB_SIZE - 1)
GENMASK(NAPOT_CONT64KB_SIZE, 0) no?
> +#define NAPOT_64KB_PTE_NUM (1UL << NAPOT_CONT64KB_ORDER)
BIT(NAPOT_CONT64KB_ORDER)?
> +#define NAPOT_64KB_MASK (7UL << _PAGE_PFN_SHIFT)
GENMASK() here too maybe? But not sure if it adds to readability.
> +
> /*
> * [62:61] Svpbmt Memory Type definitions:
> *
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 7ec936910a96..37547dd04010 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -264,10 +264,41 @@ static inline pte_t pud_pte(pud_t pud)
> return __pte(pud_val(pud));
> }
>
> +static inline bool has_svnapot(void)
> +{
> + u64 _val;
> +
> + ALT_SVNAPOT(_val);
> + return _val;
> +}
> +
> +#ifdef CONFIG_SVNAPOT
> +
> +static inline unsigned long pte_napot(pte_t pte)
> +{
> + return pte_val(pte) & _PAGE_NAPOT;
> +}
> +
> +static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
> +{
> + unsigned long napot_bits = (1UL << (order - 1)) << _PAGE_PFN_SHIFT;
I would probably prefer to write this as BIT(order - 1) << _PAGE_PFN_SHIFT
> + unsigned long lower_prot =
> + pte_val(pte) & ((1UL << _PAGE_PFN_SHIFT) - 1UL);
pte_val(pte) & GENMASK(_PAGE_PFN_SHIFT,0)?
> + unsigned long upper_prot = (pte_val(pte) >> _PAGE_PFN_SHIFT)
> + << _PAGE_PFN_SHIFT;
Why are you shifting this down & then back up again? Could you not just
reuse the negated mask here so the zeroing looks more intentional?
> +
> + return __pte(upper_prot | napot_bits | lower_prot | _PAGE_NAPOT);
> +}
> +#endif /* CONFIG_SVNAPOT */
> +
> /* Yields the page frame number (PFN) of a page table entry */
> static inline unsigned long pte_pfn(pte_t pte)
> {
> - return __page_val_to_pfn(pte_val(pte));
> + unsigned long _val = pte_val(pte);
> +
> + ALT_SVNAPOT_PTE_PFN(_val, _PAGE_NAPOT_SHIFT,
> + _PAGE_PFN_MASK, _PAGE_PFN_SHIFT);
> + return _val;
> }
>
> #define pte_page(x) pfn_to_page(pte_pfn(x))
> diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
> index 0be8a2403212..d2a61122c595 100644
> --- a/arch/riscv/kernel/cpu.c
> +++ b/arch/riscv/kernel/cpu.c
> @@ -96,6 +96,7 @@ static struct riscv_isa_ext_data isa_ext_arr[] = {
> __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
> __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
> __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
> + __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
> __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
> };
>
> diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
> index 553d755483ed..8cf52f0c5f1a 100644
> --- a/arch/riscv/kernel/cpufeature.c
> +++ b/arch/riscv/kernel/cpufeature.c
> @@ -204,6 +204,7 @@ void __init riscv_fill_hwcap(void)
> SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM);
> SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE);
> SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC);
> + SET_ISA_EXT_MAP("svnapot", RISCV_ISA_EXT_SVNAPOT);
> }
> #undef SET_ISA_EXT_MAP
> }
> @@ -284,6 +285,20 @@ static bool __init_or_module cpufeature_probe_zicbom(unsigned int stage)
> return false;
> }
>
> +static bool __init_or_module cpufeature_probe_svnapot(unsigned int stage)
> +{
> +#ifdef CONFIG_SVNAPOT
> + switch (stage) {
> + case RISCV_ALTERNATIVES_EARLY_BOOT:
> + return false;
> + default:
> + return riscv_isa_extension_available(NULL, SVNAPOT);
> + }
> +#endif
> +
> + return false;
> +}
> +
> /*
> * Probe presence of individual extensions.
> *
> @@ -301,6 +316,9 @@ static u32 __init_or_module cpufeature_probe(unsigned int stage)
> if (cpufeature_probe_zicbom(stage))
> cpu_req_feature |= (1U << CPUFEATURE_ZICBOM);
I wonder why BIT wasn't used here either?
>
> + if (cpufeature_probe_svnapot(stage))
> + cpu_req_feature |= (1U << CPUFEATURE_SVNAPOT);
Could use it here too..
Using BIT() here looks like a no-brainer, have I missed something?
Thanks,
Conor.
> +
> return cpu_req_feature;
> }
>
> --
> 2.35.1
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
More information about the linux-riscv
mailing list