[PATCH v4 13/20] arm64: reorganise PAGE_/PROT_ macros
Joey Gouly
joey.gouly at arm.com
Thu Aug 24 03:14:16 PDT 2023
Hi Ard,
On Tue, Aug 22, 2023 at 04:10:35PM +0200, Ard Biesheuvel wrote:
> On Tue, 6 Jun 2023 at 17:00, Joey Gouly <joey.gouly at arm.com> wrote:
> >
> > Make these macros available to assembly code, so they can be re-used by the
> > PIE initialisation code.
> >
> > This involves adding some extra macros, prepended with _ that are the raw
> > values not `pgprot` values.
> >
> > A dummy value for PTE_MAYBE_NG is also provided, for use in assembly.
> >
> ...
> > +
> > +#ifdef __ASSEMBLY__
> > +#define PTE_MAYBE_NG 0
> > +#endif
> > +
>
> I am struggling a bit to understand why this is ok. I get that the PIE
> index macros mask off the nG bit even if it is set, but this exposes a
> definition of PROT_DEFAULT and everything based on it to asm code that
> deviates from the one observed by C code.
Yes, it's a bit of a hack to share as much as possible, and it's "ok" because,
as you said PIE masks that bit out.
>
> I am running into this because I am adding PTE_MAYBE_SHARED for LPA2
> support (which repurposes the shareability bits as output address
> bits), and I could just #define it to 0x0 as well for assembly, but I
> am not sure this is the right approach.
Happy to do this differently, if there is a better approach.
I reverted this patch (fa4cdccaa582), and applied something like (just compile tested):
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index c7d77333ce1e..8fceeb111ad1 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -20,6 +20,17 @@
#define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
+#define PIE_PAGE_SHARED (PTE_USER | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PIE_PAGE_SHARED_EXEC (PTE_USER | PTE_PXN | PTE_WRITE)
+#define PIE_PAGE_READONLY (PTE_USER | PTE_PXN | PTE_UXN)
+#define PIE_PAGE_READONLY_EXEC (PTE_USER | PTE_PXN)
+#define PIE_PAGE_EXECONLY (PTE_PXN)
+
+#define PIE_PAGE_KERNEL (PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PIE_PAGE_KERNEL_RO (PTE_PXN | PTE_UXN)
+#define PIE_PAGE_KERNEL_ROX (PTE_UXN)
+#define PIE_PAGE_KERNEL_EXEC (PTE_UXN | PTE_WRITE)
+
/*
* This bit indicates that the entry is present i.e. pmd_page()
* still points to a valid huge page in memory even if the pmd
@@ -83,11 +94,11 @@ extern bool arm64_use_ng_mappings;
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
-#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
-#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
-#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
+#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED)
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_SHARED_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_READONLY_EXEC)
+#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PIE_PAGE_EXECONLY)
#endif /* __ASSEMBLY__ */
@@ -124,21 +135,21 @@ extern bool arm64_use_ng_mappings;
/* f: PAGE_SHARED PTE_UXN | PTE_PXN | PTE_WRITE | PTE_USER */
#define PIE_E0 ( \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW))
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_X_O) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_RX) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RWX) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW))
#define PIE_E1 ( \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL), PIE_RW))
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_EXECONLY), PIE_NONE_O) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY_EXEC), PIE_R) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED_EXEC), PIE_RW) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_READONLY), PIE_R) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_SHARED), PIE_RW) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_ROX), PIE_RX) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_EXEC), PIE_RWX) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL_RO), PIE_R) | \
+ PIRx_ELx_PERM(pte_pi_index(PIE_PAGE_KERNEL), PIE_RW))
#endif /* __ASM_PGTABLE_PROT_H */
The PAGE_KERNEL bits are harder to share, because they are based on
PROT_NORMAL. But maybe this bit of duplication is better than the #define 0x0
hack I had. Could maybe add a BUILD_BUG_ON somewhere to check that PIE_PAGE_KERNEL*
and PAGE_KERNEL have matching bits?
Thanks,
Joey
More information about the linux-arm-kernel
mailing list