[PATCH] kmap_local: don't assume kmap PTEs are linear arrays in memory
Ard Biesheuvel
ardb at kernel.org
Fri Nov 5 07:21:00 PDT 2021
On Tue, 26 Oct 2021 at 15:13, Ard Biesheuvel <ardb at kernel.org> wrote:
>
> The kmap_local conversion broke the ARM architecture, because the new
> code assumes that all PTEs used for creating kmaps form a linear array
> in memory, and uses array indexing to look up the kmap PTE belonging to
> a certain kmap index.
>
> On ARM, this cannot work, not only because the PTE pages may be
> non-adjacent in memory, but also because ARM/!LPAE interleaves hardware
> entries and extended entries (carrying software-only bits) in a way that
> is not compatible with array indexing.
>
> Fortunately, this only seems to affect configurations with more than 8
> CPUs, due to the way the per-CPU kmap slots are organized in memory.
>
> Work around this by permitting an architecture to set a Kconfig symbol
> that signifies that the kmap PTEs do not form a lineary array in memory,
> and so the only way to locate the appropriate one is to walk the page
> tables.
>
> Reported-by: Quanyang Wang <quanyang.wang at windriver.com>
> Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
> ---
Ping? Can we get this fixed please?
> arch/arm/Kconfig | 1 +
> mm/Kconfig | 3 +++
> mm/highmem.c | 32 +++++++++++++++++++++-----------
> 3 files changed, 25 insertions(+), 11 deletions(-)
>
> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> index 727c00c7d616..9aa0528f85de 100644
> --- a/arch/arm/Kconfig
> +++ b/arch/arm/Kconfig
> @@ -1467,6 +1467,7 @@ config HIGHMEM
> bool "High Memory Support"
> depends on MMU
> select KMAP_LOCAL
> + select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
> help
> The address space of ARM processors is only 4 Gigabytes large
> and it has to accommodate user address space, kernel address
> diff --git a/mm/Kconfig b/mm/Kconfig
> index d16ba9249bc5..c048dea7e342 100644
> --- a/mm/Kconfig
> +++ b/mm/Kconfig
> @@ -887,6 +887,9 @@ config MAPPING_DIRTY_HELPERS
> config KMAP_LOCAL
> bool
>
> +config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
> + bool
> +
> # struct io_mapping based helper. Selected by drivers that need them
> config IO_MAPPING
> bool
> diff --git a/mm/highmem.c b/mm/highmem.c
> index 4212ad0e4a19..1f0c8a52fd80 100644
> --- a/mm/highmem.c
> +++ b/mm/highmem.c
> @@ -504,16 +504,22 @@ static inline int kmap_local_calc_idx(int idx)
>
> static pte_t *__kmap_pte;
>
> -static pte_t *kmap_get_pte(void)
> +static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
> {
> + if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
> + /*
> + * Set by the arch if __kmap_pte[-idx] does not produce
> + * the correct entry.
> + */
> + return virt_to_kpte(vaddr);
> if (!__kmap_pte)
> __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
> - return __kmap_pte;
> + return &__kmap_pte[-idx];
> }
>
> void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
> {
> - pte_t pteval, *kmap_pte = kmap_get_pte();
> + pte_t pteval, *kmap_pte;
> unsigned long vaddr;
> int idx;
>
> @@ -525,9 +531,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
> preempt_disable();
> idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
> vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
> - BUG_ON(!pte_none(*(kmap_pte - idx)));
> + kmap_pte = kmap_get_pte(vaddr, idx);
> + BUG_ON(!pte_none(*kmap_pte));
> pteval = pfn_pte(pfn, prot);
> - arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
> + arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
> arch_kmap_local_post_map(vaddr, pteval);
> current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
> preempt_enable();
> @@ -560,7 +567,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot);
> void kunmap_local_indexed(void *vaddr)
> {
> unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
> - pte_t *kmap_pte = kmap_get_pte();
> + pte_t *kmap_pte;
> int idx;
>
> if (addr < __fix_to_virt(FIX_KMAP_END) ||
> @@ -585,8 +592,9 @@ void kunmap_local_indexed(void *vaddr)
> idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
> WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
>
> + kmap_pte = kmap_get_pte(addr, idx);
> arch_kmap_local_pre_unmap(addr);
> - pte_clear(&init_mm, addr, kmap_pte - idx);
> + pte_clear(&init_mm, addr, kmap_pte);
> arch_kmap_local_post_unmap(addr);
> current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
> kmap_local_idx_pop();
> @@ -608,7 +616,7 @@ EXPORT_SYMBOL(kunmap_local_indexed);
> void __kmap_local_sched_out(void)
> {
> struct task_struct *tsk = current;
> - pte_t *kmap_pte = kmap_get_pte();
> + pte_t *kmap_pte;
> int i;
>
> /* Clear kmaps */
> @@ -635,8 +643,9 @@ void __kmap_local_sched_out(void)
> idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
>
> addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
> + kmap_pte = kmap_get_pte(addr, idx);
> arch_kmap_local_pre_unmap(addr);
> - pte_clear(&init_mm, addr, kmap_pte - idx);
> + pte_clear(&init_mm, addr, kmap_pte);
> arch_kmap_local_post_unmap(addr);
> }
> }
> @@ -644,7 +653,7 @@ void __kmap_local_sched_out(void)
> void __kmap_local_sched_in(void)
> {
> struct task_struct *tsk = current;
> - pte_t *kmap_pte = kmap_get_pte();
> + pte_t *kmap_pte;
> int i;
>
> /* Restore kmaps */
> @@ -664,7 +673,8 @@ void __kmap_local_sched_in(void)
> /* See comment in __kmap_local_sched_out() */
> idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
> addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
> - set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
> + kmap_pte = kmap_get_pte(addr, idx);
> + set_pte_at(&init_mm, addr, kmap_pte, pteval);
> arch_kmap_local_post_map(addr, pteval);
> }
> }
> --
> 2.30.2
>
More information about the linux-arm-kernel
mailing list