[PATCH v3 1/3] kasan: use separate (un)poison implementation for integrated init

Andrey Konovalov andreyknvl at gmail.com
Tue May 25 15:00:31 PDT 2021


On Wed, May 12, 2021 at 11:09 PM Peter Collingbourne <pcc at google.com> wrote:
>
> Currently with integrated init page_alloc.c needs to know whether
> kasan_alloc_pages() will zero initialize memory, but this will start
> becoming more complicated once we start adding tag initialization
> support for user pages. To avoid page_alloc.c needing to know more
> details of what integrated init will do, move the unpoisoning logic
> for integrated init into the HW tags implementation. Currently the
> logic is identical but it will diverge in subsequent patches.
>
> For symmetry do the same for poisoning although this logic will
> be unaffected by subsequent patches.
>
> Signed-off-by: Peter Collingbourne <pcc at google.com>
> Link: https://linux-review.googlesource.com/id/I2c550234c6c4a893c48c18ff0c6ce658c7c67056
> ---
> v3:
> - use BUILD_BUG()
>
> v2:
> - fix build with KASAN disabled
>
>  include/linux/kasan.h | 66 +++++++++++++++++++++++++++----------------
>  mm/kasan/common.c     |  4 +--
>  mm/kasan/hw_tags.c    | 14 +++++++++
>  mm/mempool.c          |  6 ++--
>  mm/page_alloc.c       | 56 +++++++++++++++++++-----------------
>  5 files changed, 91 insertions(+), 55 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index b1678a61e6a7..38061673e6ac 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -2,6 +2,7 @@
>  #ifndef _LINUX_KASAN_H
>  #define _LINUX_KASAN_H
>
> +#include <linux/bug.h>
>  #include <linux/static_key.h>
>  #include <linux/types.h>
>
> @@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {}
>
>  #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
>
> -#ifdef CONFIG_KASAN
> -
> -struct kasan_cache {
> -       int alloc_meta_offset;
> -       int free_meta_offset;
> -       bool is_kmalloc;
> -};
> -
>  #ifdef CONFIG_KASAN_HW_TAGS
>
>  DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
> @@ -101,11 +94,18 @@ static inline bool kasan_has_integrated_init(void)
>         return kasan_enabled();
>  }
>
> +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
> +void kasan_free_pages(struct page *page, unsigned int order);
> +
>  #else /* CONFIG_KASAN_HW_TAGS */
>
>  static inline bool kasan_enabled(void)
>  {
> +#ifdef CONFIG_KASAN
>         return true;
> +#else
> +       return false;
> +#endif
>  }
>
>  static inline bool kasan_has_integrated_init(void)
> @@ -113,8 +113,30 @@ static inline bool kasan_has_integrated_init(void)
>         return false;
>  }
>
> +static __always_inline void kasan_alloc_pages(struct page *page,
> +                                             unsigned int order, gfp_t flags)
> +{
> +       /* Only available for integrated init. */
> +       BUILD_BUG();
> +}
> +
> +static __always_inline void kasan_free_pages(struct page *page,
> +                                            unsigned int order)
> +{
> +       /* Only available for integrated init. */
> +       BUILD_BUG();
> +}
> +
>  #endif /* CONFIG_KASAN_HW_TAGS */
>
> +#ifdef CONFIG_KASAN
> +
> +struct kasan_cache {
> +       int alloc_meta_offset;
> +       int free_meta_offset;
> +       bool is_kmalloc;
> +};
> +
>  slab_flags_t __kasan_never_merge(void);
>  static __always_inline slab_flags_t kasan_never_merge(void)
>  {
> @@ -130,20 +152,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
>                 __kasan_unpoison_range(addr, size);
>  }
>
> -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
> -static __always_inline void kasan_alloc_pages(struct page *page,
> +void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
> +static __always_inline void kasan_poison_pages(struct page *page,
>                                                 unsigned int order, bool init)
>  {
>         if (kasan_enabled())
> -               __kasan_alloc_pages(page, order, init);
> +               __kasan_poison_pages(page, order, init);
>  }
>
> -void __kasan_free_pages(struct page *page, unsigned int order, bool init);
> -static __always_inline void kasan_free_pages(struct page *page,
> -                                               unsigned int order, bool init)
> +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
> +static __always_inline void kasan_unpoison_pages(struct page *page,
> +                                                unsigned int order, bool init)
>  {
>         if (kasan_enabled())
> -               __kasan_free_pages(page, order, init);
> +               __kasan_unpoison_pages(page, order, init);
>  }
>
>  void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> @@ -285,21 +307,15 @@ void kasan_restore_multi_shot(bool enabled);
>
>  #else /* CONFIG_KASAN */
>
> -static inline bool kasan_enabled(void)
> -{
> -       return false;
> -}
> -static inline bool kasan_has_integrated_init(void)
> -{
> -       return false;
> -}
>  static inline slab_flags_t kasan_never_merge(void)
>  {
>         return 0;
>  }
>  static inline void kasan_unpoison_range(const void *address, size_t size) {}
> -static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
> -static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
> +static inline void kasan_poison_pages(struct page *page, unsigned int order,
> +                                     bool init) {}
> +static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
> +                                       bool init) {}
>  static inline void kasan_cache_create(struct kmem_cache *cache,
>                                       unsigned int *size,
>                                       slab_flags_t *flags) {}
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 6bb87f2acd4e..0ecd293af344 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
>         return 0;
>  }
>
> -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
> +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
>  {
>         u8 tag;
>         unsigned long i;
> @@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
>         kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
>  }
>
> -void __kasan_free_pages(struct page *page, unsigned int order, bool init)
> +void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
>  {
>         if (likely(!PageHighMem(page)))
>                 kasan_poison(page_address(page), PAGE_SIZE << order,
> diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
> index 4004388b4e4b..45e552cb9172 100644
> --- a/mm/kasan/hw_tags.c
> +++ b/mm/kasan/hw_tags.c
> @@ -238,6 +238,20 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
>         return &alloc_meta->free_track[0];
>  }
>
> +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
> +{
> +       bool init = !want_init_on_free() && want_init_on_alloc(flags);

This check is now duplicated. One check here, the same one in
page_alloc.c. Please either add a helper that gets used in both
places, or at least a comment that the checks must be kept in sync.

> +
> +       kasan_unpoison_pages(page, order, init);
> +}
> +
> +void kasan_free_pages(struct page *page, unsigned int order)
> +{
> +       bool init = want_init_on_free();

Same here.

> +
> +       kasan_poison_pages(page, order, init);
> +}
> +
>  #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
>
>  void kasan_set_tagging_report_once(bool state)
> diff --git a/mm/mempool.c b/mm/mempool.c
> index a258cf4de575..0b8afbec3e35 100644
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
>         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
>                 kasan_slab_free_mempool(element);
>         else if (pool->alloc == mempool_alloc_pages)
> -               kasan_free_pages(element, (unsigned long)pool->pool_data, false);
> +               kasan_poison_pages(element, (unsigned long)pool->pool_data,
> +                                  false);
>  }
>
>  static void kasan_unpoison_element(mempool_t *pool, void *element)
> @@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
>         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
>                 kasan_unpoison_range(element, __ksize(element));
>         else if (pool->alloc == mempool_alloc_pages)
> -               kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
> +               kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
> +                                    false);
>  }
>
>  static __always_inline void add_element(mempool_t *pool, void *element)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index aaa1655cf682..6e82a7f6fd6f 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
>  static DEFINE_STATIC_KEY_TRUE(deferred_pages);
>
>  /*
> - * Calling kasan_free_pages() only after deferred memory initialization
> + * Calling kasan_poison_pages() only after deferred memory initialization
>   * has completed. Poisoning pages during deferred memory init will greatly
>   * lengthen the process and cause problem in large memory systems as the
>   * deferred pages initialization is done with interrupt disabled.
> @@ -394,15 +394,11 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
>   * on-demand allocation and then freed again before the deferred pages
>   * initialization is done, but this is not likely to happen.
>   */
> -static inline void kasan_free_nondeferred_pages(struct page *page, int order,
> -                                               bool init, fpi_t fpi_flags)
> +static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
>  {
> -       if (static_branch_unlikely(&deferred_pages))
> -               return;
> -       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> -                       (fpi_flags & FPI_SKIP_KASAN_POISON))
> -               return;
> -       kasan_free_pages(page, order, init);
> +       return static_branch_unlikely(&deferred_pages) ||
> +              (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> +               (fpi_flags & FPI_SKIP_KASAN_POISON));
>  }
>
>  /* Returns true if the struct page for the pfn is uninitialised */
> @@ -453,13 +449,10 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
>         return false;
>  }
>  #else
> -static inline void kasan_free_nondeferred_pages(struct page *page, int order,
> -                                               bool init, fpi_t fpi_flags)
> +static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
>  {
> -       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> -                       (fpi_flags & FPI_SKIP_KASAN_POISON))
> -               return;
> -       kasan_free_pages(page, order, init);
> +       return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> +               (fpi_flags & FPI_SKIP_KASAN_POISON));
>  }
>
>  static inline bool early_page_uninitialised(unsigned long pfn)
> @@ -1245,7 +1238,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
>                         unsigned int order, bool check_free, fpi_t fpi_flags)
>  {
>         int bad = 0;
> -       bool init;
> +       bool skip_kasan_poison = should_skip_kasan_poison(fpi_flags);
>
>         VM_BUG_ON_PAGE(PageTail(page), page);
>
> @@ -1314,10 +1307,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
>          * With hardware tag-based KASAN, memory tags must be set before the
>          * page becomes unavailable via debug_pagealloc or arch_free_page.
>          */
> -       init = want_init_on_free();
> -       if (init && !kasan_has_integrated_init())
> -               kernel_init_free_pages(page, 1 << order);
> -       kasan_free_nondeferred_pages(page, order, init, fpi_flags);
> +       if (kasan_has_integrated_init()) {

Is it guaranteed that this branch will be eliminated when
kasan_has_integrated_init() is static inline returning false? I know
this works with macros, but I don't remember seeing cases with static
inline functions. I guess it's the same, but mentioning just in case
because BUILD_BUG() stood out.

> +               if (!skip_kasan_poison)
> +                       kasan_free_pages(page, order);
> +       } else {
> +               bool init = want_init_on_free();
> +
> +               if (init)
> +                       kernel_init_free_pages(page, 1 << order);
> +               if (!skip_kasan_poison)
> +                       kasan_poison_pages(page, order, init);
> +       }
>
>         /*
>          * arch_free_page() can make the page's contents inaccessible.  s390
> @@ -2324,8 +2324,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
>  inline void post_alloc_hook(struct page *page, unsigned int order,
>                                 gfp_t gfp_flags)
>  {
> -       bool init;
> -
>         set_page_private(page, 0);
>         set_page_refcounted(page);
>
> @@ -2344,10 +2342,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
>          * kasan_alloc_pages and kernel_init_free_pages must be
>          * kept together to avoid discrepancies in behavior.
>          */
> -       init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
> -       kasan_alloc_pages(page, order, init);
> -       if (init && !kasan_has_integrated_init())
> -               kernel_init_free_pages(page, 1 << order);
> +       if (kasan_has_integrated_init()) {
> +               kasan_alloc_pages(page, order, gfp_flags);
> +       } else {
> +               bool init =
> +                       !want_init_on_free() && want_init_on_alloc(gfp_flags);
> +
> +               kasan_unpoison_pages(page, order, init);
> +               if (init)
> +                       kernel_init_free_pages(page, 1 << order);
> +       }
>
>         set_page_owner(page, order, gfp_flags);
>  }
> --
> 2.31.1.607.g51e8a6a459-goog
>



More information about the linux-arm-kernel mailing list