[PATCH v3 03/18] KVM: arm64: Make hyp_page::order a u8
Fuad Tabba
tabba at google.com
Tue Dec 17 00:43:45 PST 2024
On Mon, 16 Dec 2024 at 17:58, Quentin Perret <qperret at google.com> wrote:
>
> We don't need 16 bits to store the hyp page order, and we'll need some
> bits to store page ownership data soon, so let's reduce the order
> member.
>
> Signed-off-by: Quentin Perret <qperret at google.com>
Reviewed-by: Fuad Tabba <tabba at google.com>
Cheers,
/fuad
> ---
> arch/arm64/kvm/hyp/include/nvhe/gfp.h | 6 +++---
> arch/arm64/kvm/hyp/include/nvhe/memory.h | 5 +++--
> arch/arm64/kvm/hyp/nvhe/page_alloc.c | 14 +++++++-------
> 3 files changed, 13 insertions(+), 12 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> index 97c527ef53c2..f1725bad6331 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> @@ -7,7 +7,7 @@
> #include <nvhe/memory.h>
> #include <nvhe/spinlock.h>
>
> -#define HYP_NO_ORDER USHRT_MAX
> +#define HYP_NO_ORDER 0xff
>
> struct hyp_pool {
> /*
> @@ -19,11 +19,11 @@ struct hyp_pool {
> struct list_head free_area[NR_PAGE_ORDERS];
> phys_addr_t range_start;
> phys_addr_t range_end;
> - unsigned short max_order;
> + u8 max_order;
> };
>
> /* Allocation */
> -void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
> +void *hyp_alloc_pages(struct hyp_pool *pool, u8 order);
> void hyp_split_page(struct hyp_page *page);
> void hyp_get_page(struct hyp_pool *pool, void *addr);
> void hyp_put_page(struct hyp_pool *pool, void *addr);
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> index c84b24234ac7..45b8d1840aa4 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> @@ -41,8 +41,9 @@ static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
> }
>
> struct hyp_page {
> - unsigned short refcount;
> - unsigned short order;
> + u16 refcount;
> + u8 order;
> + u8 reserved;
> };
>
> extern u64 __hyp_vmemmap;
> diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> index e691290d3765..a1eb27a1a747 100644
> --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> @@ -32,7 +32,7 @@ u64 __hyp_vmemmap;
> */
> static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
> struct hyp_page *p,
> - unsigned short order)
> + u8 order)
> {
> phys_addr_t addr = hyp_page_to_phys(p);
>
> @@ -51,7 +51,7 @@ static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
> /* Find a buddy page currently available for allocation */
> static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
> struct hyp_page *p,
> - unsigned short order)
> + u8 order)
> {
> struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
>
> @@ -94,7 +94,7 @@ static void __hyp_attach_page(struct hyp_pool *pool,
> struct hyp_page *p)
> {
> phys_addr_t phys = hyp_page_to_phys(p);
> - unsigned short order = p->order;
> + u8 order = p->order;
> struct hyp_page *buddy;
>
> memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
> @@ -129,7 +129,7 @@ static void __hyp_attach_page(struct hyp_pool *pool,
>
> static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
> struct hyp_page *p,
> - unsigned short order)
> + u8 order)
> {
> struct hyp_page *buddy;
>
> @@ -183,7 +183,7 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
>
> void hyp_split_page(struct hyp_page *p)
> {
> - unsigned short order = p->order;
> + u8 order = p->order;
> unsigned int i;
>
> p->order = 0;
> @@ -195,10 +195,10 @@ void hyp_split_page(struct hyp_page *p)
> }
> }
>
> -void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
> +void *hyp_alloc_pages(struct hyp_pool *pool, u8 order)
> {
> - unsigned short i = order;
> struct hyp_page *p;
> + u8 i = order;
>
> hyp_spin_lock(&pool->lock);
>
> --
> 2.47.1.613.gc27f4b7a9f-goog
>
More information about the linux-arm-kernel
mailing list