[PATCH 04/14] KVM: arm64: Mediate host access to GIC/ITS MMIO via unmapping

Fuad Tabba tabba at google.com
Fri Mar 13 02:58:04 PDT 2026


Hi Sebastian,

On Tue, 10 Mar 2026 at 12:49, Sebastian Ene <sebastianene at google.com> wrote:
>
> Unmap the ITS MMIO region from the host address space to enforce
> hypervisor mediation.
> Identify the ITS base address from the device tree and store it in a
> protected region. A callback is registered to handle host accesses to
> this region; currently, the handler simply forwards all MMIO requests
> to the physical hardware. This provides the infrastructure for future
> hardware state validation without changing current behavior.
>
> Signed-off-by: Sebastian Ene <sebastianene at google.com>
> ---
>  arch/arm64/include/asm/kvm_pkvm.h     |  2 ++
>  arch/arm64/kvm/hyp/nvhe/Makefile      |  3 ++-
>  arch/arm64/kvm/hyp/nvhe/its_emulate.c | 23 ++++++++++++++++
>  arch/arm64/kvm/pkvm.c                 | 38 +++++++++++++++++++++++++++
>  4 files changed, 65 insertions(+), 1 deletion(-)
>  create mode 100644 arch/arm64/kvm/hyp/nvhe/its_emulate.c
>
> diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
> index 5321ced2f50a..ef00c1bf7d00 100644
> --- a/arch/arm64/include/asm/kvm_pkvm.h
> +++ b/arch/arm64/include/asm/kvm_pkvm.h
> @@ -32,6 +32,8 @@ struct pkvm_protected_reg {
>
>  extern struct pkvm_protected_reg kvm_nvhe_sym(pkvm_protected_regs)[];
>  extern unsigned int kvm_nvhe_sym(num_protected_reg);
> +extern void kvm_nvhe_sym(pkvm_handle_forward_req)(struct pkvm_protected_reg *region, u64 offset,
> +                                                 bool write, u64 *reg, u8 reg_size);
>
>  int pkvm_init_host_vm(struct kvm *kvm);
>  int pkvm_create_hyp_vm(struct kvm *kvm);
> diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
> index a244ec25f8c5..eb43269fbac2 100644
> --- a/arch/arm64/kvm/hyp/nvhe/Makefile
> +++ b/arch/arm64/kvm/hyp/nvhe/Makefile
> @@ -24,7 +24,8 @@ CFLAGS_switch.nvhe.o += -Wno-override-init
>
>  hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
>          hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \
> -        cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
> +        cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o \
> +        its_emulate.o
>  hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
>          ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
>  hyp-obj-y += ../../../kernel/smccc-call.o
> diff --git a/arch/arm64/kvm/hyp/nvhe/its_emulate.c b/arch/arm64/kvm/hyp/nvhe/its_emulate.c
> new file mode 100644
> index 000000000000..0eecbb011898
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/nvhe/its_emulate.c
> @@ -0,0 +1,23 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +
> +#include <asm/kvm_pkvm.h>
> +#include <nvhe/mem_protect.h>
> +
> +
> +void pkvm_handle_forward_req(struct pkvm_protected_reg *region, u64 offset, bool write,
> +                            u64 *reg, u8 reg_size)
> +{
> +       void __iomem *addr = __hyp_va((region->start_pfn << PAGE_SHIFT) + offset);

Better use the macros to do this:
+       void __iomem *addr = __hyp_va(PFN_PHYS(region->start_pfn) + offset);

Also, considering all the potential issues with hyp_va, it might be
better to store the virtual address in pkvm_protected_regs (void
__iomem *base_va) assigned to the MMIO region at the time it was
successfully mapped into EL2, rather than recalculating it on the fly.

> +
> +       if (reg_size == sizeof(u32)) {
> +               if (!write)
> +                       *reg = readl_relaxed(addr);
> +               else
> +                       writel_relaxed(*reg, addr);
> +       } else if (reg_size == sizeof(u64)) {
> +               if (!write)
> +                       *reg = readq_relaxed(addr);
> +               else
> +                       writeq_relaxed(*reg, addr);
> +       }

The spec permits 8-bit, 16-bit, 32-bit, and 64-bit accesses depending
on the specific register. This only checks for sizeof(u32) and
sizeof(u64). You should either handle them, or you intentionally
wanted to block 8-bit/16-bit accesses, then you must explicitly inject
a synchronous abort back to the host.

Also, I think that using `readl_relaxed` and `writel_relaxed` is
technically correct for pure passthrough, as it assumes the host
driver issued the necessary barriers before triggering the trap.
However, with memory barriers I am always a bit wary, and I'd rather
we check with someone to confirm whether this is ok, i.e., Will
Deacon.

> +}
> diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
> index d7a0f69a9982..a766be6de735 100644
> --- a/arch/arm64/kvm/pkvm.c
> +++ b/arch/arm64/kvm/pkvm.c
> @@ -11,6 +11,9 @@
>  #include <asm/kvm_mmu.h>
>  #include <linux/memblock.h>
>  #include <linux/mutex.h>
> +#include <linux/of_address.h>
> +#include <linux/of_reserved_mem.h>
> +#include <linux/platform_device.h>
>
>  #include <asm/kvm_pkvm.h>
>
> @@ -18,6 +21,7 @@
>
>  DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
>
> +static struct pkvm_protected_reg *pkvm_protected_regs = kvm_nvhe_sym(pkvm_protected_regs);
>  static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
>  static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
>
> @@ -39,6 +43,34 @@ static int __init register_memblock_regions(void)
>         return 0;
>  }
>
> +static int __init register_protected_regions(void)
> +{
> +       int i = 0, ret;
> +       struct device_node *np;
> +       struct resource res;

Prefer reverse Christmas tree declaration order.

> +
> +       for_each_compatible_node(np, NULL, "arm,gic-v3-its") {
> +               ret = of_address_to_resource(np, i, &res);
> +               if (ret)
> +                       return ret;
> +
> +               if (i >= PKVM_PROTECTED_REGS_NUM)
> +                       return -ENOMEM;
> +
> +               if (!PAGE_ALIGNED(res.start) || !PAGE_ALIGNED(resource_size(&res)))
> +                       return -EINVAL;
> +
> +               pkvm_protected_regs[i].start_pfn = res.start >> PAGE_SHIFT;

and this:
... PHYS_PFN(res.start);


> +               pkvm_protected_regs[i].num_pages = resource_size(&res) >> PAGE_SHIFT;

and this:
... PFN_DOWN(resource_size(&res));

Cheers,
/fuad

> +               pkvm_protected_regs[i].cb = lm_alias(&kvm_nvhe_sym(pkvm_handle_forward_req));
> +               i++;
> +       }
> +
> +       kvm_nvhe_sym(num_protected_reg) = i;
> +
> +       return 0;
> +}
> +
>  void __init kvm_hyp_reserve(void)
>  {
>         u64 hyp_mem_pages = 0;
> @@ -57,6 +89,12 @@ void __init kvm_hyp_reserve(void)
>                 return;
>         }
>
> +       ret = register_protected_regions();
> +       if (ret) {
> +               kvm_err("Failed to register protected reg: %d\n", ret);
> +               return;
> +       }
> +
>         hyp_mem_pages += hyp_s1_pgtable_pages();
>         hyp_mem_pages += host_s2_pgtable_pages();
>         hyp_mem_pages += hyp_vm_table_pages();
> --
> 2.53.0.473.g4a7958ca14-goog
>



More information about the linux-arm-kernel mailing list