[RFC PATCH 27/45] KVM: arm64: smmu-v3: Setup domains and page table configuration

Mostafa Saleh smostafa at google.com
Mon Jan 15 06:34:12 PST 2024


Hi Jean,

On Wed, Feb 1, 2023 at 12:59 PM Jean-Philippe Brucker
<jean-philippe at linaro.org> wrote:
>
> Setup the stream table entries when the host issues the attach_dev() and
> detach_dev() hypercalls. The driver holds one io-pgtable configuration
> for all domains.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
> ---
>  include/kvm/arm_smmu_v3.h                   |   2 +
>  arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c | 178 +++++++++++++++++++-
>  2 files changed, 177 insertions(+), 3 deletions(-)
>
> diff --git a/include/kvm/arm_smmu_v3.h b/include/kvm/arm_smmu_v3.h
> index fc67a3bf5709..ed139b0e9612 100644
> --- a/include/kvm/arm_smmu_v3.h
> +++ b/include/kvm/arm_smmu_v3.h
> @@ -3,6 +3,7 @@
>  #define __KVM_ARM_SMMU_V3_H
>
>  #include <asm/kvm_asm.h>
> +#include <linux/io-pgtable-arm.h>
>  #include <kvm/iommu.h>
>
>  #if IS_ENABLED(CONFIG_ARM_SMMU_V3_PKVM)
> @@ -28,6 +29,7 @@ struct hyp_arm_smmu_v3_device {
>         size_t                  strtab_num_entries;
>         size_t                  strtab_num_l1_entries;
>         u8                      strtab_split;
> +       struct arm_lpae_io_pgtable pgtable;
>  };
>
>  extern size_t kvm_nvhe_sym(kvm_hyp_arm_smmu_v3_count);
> diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> index 81040339ccfe..56e313203a16 100644
> --- a/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> +++ b/arch/arm64/kvm/hyp/nvhe/iommu/arm-smmu-v3.c
> @@ -152,7 +152,6 @@ static int smmu_send_cmd(struct hyp_arm_smmu_v3_device *smmu,
>         return smmu_sync_cmd(smmu);
>  }
>
> -__maybe_unused
>  static int smmu_sync_ste(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
>  {
>         struct arm_smmu_cmdq_ent cmd = {
> @@ -194,7 +193,6 @@ static int smmu_alloc_l2_strtab(struct hyp_arm_smmu_v3_device *smmu, u32 idx)
>         return 0;
>  }
>
> -__maybe_unused
>  static u64 *smmu_get_ste_ptr(struct hyp_arm_smmu_v3_device *smmu, u32 sid)
>  {
>         u32 idx;
> @@ -382,6 +380,68 @@ static int smmu_reset_device(struct hyp_arm_smmu_v3_device *smmu)
>         return smmu_write_cr0(smmu, 0);
>  }
>
> +static struct hyp_arm_smmu_v3_device *to_smmu(struct kvm_hyp_iommu *iommu)
> +{
> +       return container_of(iommu, struct hyp_arm_smmu_v3_device, iommu);
> +}
> +
> +static void smmu_tlb_flush_all(void *cookie)
> +{
> +       struct kvm_iommu_tlb_cookie *data = cookie;
> +       struct hyp_arm_smmu_v3_device *smmu = to_smmu(data->iommu);
> +       struct arm_smmu_cmdq_ent cmd = {
> +               .opcode = CMDQ_OP_TLBI_S12_VMALL,
> +               .tlbi.vmid = data->domain_id,
> +       };
> +
> +       WARN_ON(smmu_send_cmd(smmu, &cmd));
> +}
> +
> +static void smmu_tlb_inv_range(struct kvm_iommu_tlb_cookie *data,
> +                              unsigned long iova, size_t size, size_t granule,
> +                              bool leaf)
> +{
> +       struct hyp_arm_smmu_v3_device *smmu = to_smmu(data->iommu);
> +       unsigned long end = iova + size;
> +       struct arm_smmu_cmdq_ent cmd = {
> +               .opcode = CMDQ_OP_TLBI_S2_IPA,
> +               .tlbi.vmid = data->domain_id,
> +               .tlbi.leaf = leaf,
> +       };
> +
> +       /*
> +        * There are no mappings at high addresses since we don't use TTB1, so
> +        * no overflow possible.
> +        */
> +       BUG_ON(end < iova);
> +
> +       while (iova < end) {
> +               cmd.tlbi.addr = iova;
> +               WARN_ON(smmu_send_cmd(smmu, &cmd));

This would issue a sync command between each range, which is not needed,
maybe we can build the command first and then issue the sync, similar
to what the upstream driver does, what do you think?

Thanks,
Mostafa



More information about the linux-arm-kernel mailing list