[PATCH] arm64: hugetlb: Restore TLB invalidation for BBM on contiguous ptes
Anshuman Khandual
anshuman.khandual at arm.com
Thu Jun 30 00:02:02 PDT 2022
On 6/29/22 15:23, Will Deacon wrote:
> Commit fb396bb459c1 ("arm64/hugetlb: Drop TLB flush from get_clear_flush()")
> removed TLB invalidation from get_clear_flush() [now get_clear_contig()]
> on the basis that the core TLB invalidation code is aware of hugetlb
> mappings backed by contiguous page-table entries and will cover the
> correct virtual address range.
>
> However, this change also resulted in the TLB invalidation being removed
> from the "break" step in the break-before-make (BBM) sequence used
> internally by huge_ptep_set_{access_flags,wrprotect}(), therefore
> making the BBM sequence unsafe irrespective of later invalidation.
>
> Although the architecture is desperately unclear about how exactly
> contiguous ptes should be updated in a live page-table, restore TLB
> invalidation to our BBM sequence under the assumption that BBM is the
> right thing to be doing in the first place.
>
> Cc: Ard Biesheuvel <ardb at kernel.org>
> Cc: Steve Capper <steve.capper at arm.com>
> Cc: Anshuman Khandual <anshuman.khandual at arm.com>
> Cc: Mike Kravetz <mike.kravetz at oracle.com>
> Cc: Catalin Marinas <catalin.marinas at arm.com>
> Cc: Marc Zyngier <maz at kernel.org>
> Signed-off-by: Will Deacon <will at kernel.org>
> ---
There is a checkpatch warning for commit message.
WARNING: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#7:
Commit fb396bb459c1 ("arm64/hugetlb: Drop TLB flush from get_clear_flush()")
total: 0 errors, 1 warnings, 77 lines checked
NOTE: For some of the reported defects, checkpatch may be able to
mechanically convert to the typical style using --fix or --fix-inplace.
Otherwise LGTM.
Reviewed-by: Anshuman Khandual <anshuman.khandual at arm.com>
>
> Found by inspection.
>
> arch/arm64/mm/hugetlbpage.c | 30 +++++++++++++++++++++---------
> 1 file changed, 21 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
> index e2a5ec9fdc0d..3618ef3f6d81 100644
> --- a/arch/arm64/mm/hugetlbpage.c
> +++ b/arch/arm64/mm/hugetlbpage.c
> @@ -214,6 +214,19 @@ static pte_t get_clear_contig(struct mm_struct *mm,
> return orig_pte;
> }
>
> +static pte_t get_clear_contig_flush(struct mm_struct *mm,
> + unsigned long addr,
> + pte_t *ptep,
> + unsigned long pgsize,
> + unsigned long ncontig)
> +{
> + pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
> + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
> +
> + flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
> + return orig_pte;
> +}
> +
> /*
> * Changing some bits of contiguous entries requires us to follow a
> * Break-Before-Make approach, breaking the whole contiguous set
> @@ -447,19 +460,20 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
> int ncontig, i;
> size_t pgsize = 0;
> unsigned long pfn = pte_pfn(pte), dpfn;
> + struct mm_struct *mm = vma->vm_mm;
> pgprot_t hugeprot;
> pte_t orig_pte;
>
> if (!pte_cont(pte))
> return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
>
> - ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
> + ncontig = find_num_contig(mm, addr, ptep, &pgsize);
> dpfn = pgsize >> PAGE_SHIFT;
>
> if (!__cont_access_flags_changed(ptep, pte, ncontig))
> return 0;
>
> - orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig);
> + orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
>
> /* Make sure we don't lose the dirty or young state */
> if (pte_dirty(orig_pte))
> @@ -470,7 +484,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
>
> hugeprot = pte_pgprot(pte);
> for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
> - set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
> + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
>
> return 1;
> }
> @@ -492,7 +506,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
> ncontig = find_num_contig(mm, addr, ptep, &pgsize);
> dpfn = pgsize >> PAGE_SHIFT;
>
> - pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
> + pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
> pte = pte_wrprotect(pte);
>
> hugeprot = pte_pgprot(pte);
> @@ -505,17 +519,15 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
> pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
> unsigned long addr, pte_t *ptep)
> {
> + struct mm_struct *mm = vma->vm_mm;
> size_t pgsize;
> int ncontig;
> - pte_t orig_pte;
>
> if (!pte_cont(READ_ONCE(*ptep)))
> return ptep_clear_flush(vma, addr, ptep);
>
> - ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
> - orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig);
> - flush_tlb_range(vma, addr, addr + pgsize * ncontig);
> - return orig_pte;
> + ncontig = find_num_contig(mm, addr, ptep, &pgsize);
> + return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
> }
>
> static int __init hugetlbpage_init(void)
More information about the linux-arm-kernel
mailing list