[PATCH v3 07/21] KVM: arm64: Convert kvm_phys_addr_ioremap() to generic page-table API
Alexandru Elisei
alexandru.elisei at arm.com
Tue Sep 1 13:08:01 EDT 2020
Hi Will,
The patch looks correct to me. I also had another look at the pre-order visitor
for kvm_pgtable_stage2_map, and it will not try to map the address range using a
block mapping (kvm_block_mapping_supported returns false).
One nitpick below.
On 8/25/20 10:39 AM, Will Deacon wrote:
> Convert kvm_phys_addr_ioremap() to use kvm_pgtable_stage2_map() instead
> of stage2_set_pte().
>
> Cc: Marc Zyngier <maz at kernel.org>
> Cc: Quentin Perret <qperret at google.com>
> Signed-off-by: Will Deacon <will at kernel.org>
> ---
> arch/arm64/kvm/hyp/pgtable.c | 14 +-------------
> arch/arm64/kvm/mmu.c | 29 ++++++++++++-----------------
> 2 files changed, 13 insertions(+), 30 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> index 41ee8f3c0369..6f65d3841ec9 100644
> --- a/arch/arm64/kvm/hyp/pgtable.c
> +++ b/arch/arm64/kvm/hyp/pgtable.c
> @@ -439,18 +439,6 @@ struct stage2_map_data {
> struct kvm_mmu_memory_cache *memcache;
> };
>
> -static kvm_pte_t *stage2_memcache_alloc_page(struct stage2_map_data *data)
> -{
> - kvm_pte_t *ptep = NULL;
> - struct kvm_mmu_memory_cache *mc = data->memcache;
> -
> - /* Allocated with GFP_PGTABLE_USER, so no need to zero */
> - if (mc && mc->nobjs)
> - ptep = mc->objects[--mc->nobjs];
> -
> - return ptep;
> -}
> -
> static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot,
> struct stage2_map_data *data)
> {
> @@ -531,7 +519,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
> if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
> return -EINVAL;
>
> - childp = stage2_memcache_alloc_page(data);
> + childp = kvm_mmu_memory_cache_alloc(data->memcache);
I think this hunk and the above could have been squashed in the previous patch, I
think we could have used kvm_mmu_memory_cache_alloc directly from the start.
Thanks,
Alex
> if (!childp)
> return -ENOMEM;
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 4607e9ca60a2..33146d3dc93a 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1154,35 +1154,30 @@ static int stage2_pudp_test_and_clear_young(pud_t *pud)
> int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
> phys_addr_t pa, unsigned long size, bool writable)
> {
> - phys_addr_t addr, end;
> + phys_addr_t addr;
> int ret = 0;
> - unsigned long pfn;
> struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
> + struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
> + enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
> + KVM_PGTABLE_PROT_R |
> + (writable ? KVM_PGTABLE_PROT_W : 0);
>
> - end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
> - pfn = __phys_to_pfn(pa);
> -
> - for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
> - pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
> -
> - if (writable)
> - pte = kvm_s2pte_mkwrite(pte);
> -
> + for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
> ret = kvm_mmu_topup_memory_cache(&cache,
> kvm_mmu_cache_min_pages(kvm));
> if (ret)
> - goto out;
> + break;
> +
> spin_lock(&kvm->mmu_lock);
> - ret = stage2_set_pte(&kvm->arch.mmu, &cache, addr, &pte,
> - KVM_S2PTE_FLAG_IS_IOMAP);
> + ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
> + &cache);
> spin_unlock(&kvm->mmu_lock);
> if (ret)
> - goto out;
> + break;
>
> - pfn++;
> + pa += PAGE_SIZE;
> }
>
> -out:
> kvm_mmu_free_memory_cache(&cache);
> return ret;
> }
More information about the linux-arm-kernel
mailing list