[PATCH v5sub1 7/8] arm64: move kernel image to base of vmalloc area

Mark Rutland mark.rutland at arm.com
Tue Feb 16 08:42:45 PST 2016


On Tue, Feb 16, 2016 at 06:36:36PM +0300, Andrey Ryabinin wrote:
> 
> On 02/16/2016 06:17 PM, Ard Biesheuvel wrote:
> > On 16 February 2016 at 13:59, Andrey Ryabinin <aryabinin at virtuozzo.com> wrote:
> >> +static void verify_shadow(void)
> >> +{
> >> +       struct memblock_region *reg;
> >> +       int i = 0;
> >> +
> >> +       for_each_memblock(memory, reg) {
> >> +               void *start = (void *)__phys_to_virt(reg->base);
> >> +               void *end = (void *)__phys_to_virt(reg->base + reg->size);
> >> +               int *shadow_start, *shadow_end;
> >> +
> >> +               if (start >= end)
> >> +                       break;
> >> +               shadow_start = (int *)((unsigned long)kasan_mem_to_shadow(start) & ~(PAGE_SIZE - 1));
> >> +               shadow_end =  (int *)kasan_mem_to_shadow(end);
> > 
> > shadow_start and shadow_end can refer to the same page as in the
> > previous iteration. For instance, I have these two regions
> > 
> >   0x00006e090000-0x00006e0adfff [Conventional Memory|   |  |  |  |  |
> > |   |WB|WT|WC|UC]
> >   0x00006e0ae000-0x00006e0affff [Loader Data        |   |  |  |  |  |
> > |   |WB|WT|WC|UC]
> > 
> > which are covered by different memblocks since the second one is
> > marked as MEMBLOCK_NOMAP, due to the fact that it contains the UEFI
> > memory map.
> > 
> > I get the following output
> > 
> > kasan: screwed shadow mapping 23575, 23573
> > 
> > which I think is simply a result from the fact the shadow_start refers
> > to the same page as in the previous iteration(s)
> > 
> 
> You are right. 
> So we should write 'shadow_start' instead of 'i'.

FWIW with the below patch I don't see any "screwed shadow mapping"
warnings on my board, and still later see a tonne of KASAN splats in the
scheduler.

Mark.

> ---
>  arch/arm64/mm/kasan_init.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 51 insertions(+)
> 
> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> index cf038c7..ee035c2 100644
> --- a/arch/arm64/mm/kasan_init.c
> +++ b/arch/arm64/mm/kasan_init.c
> @@ -117,6 +117,55 @@ static void __init cpu_set_ttbr1(unsigned long ttbr1)
>  	: "r" (ttbr1));
>  }
>  
> +static void verify_shadow(void)
> +{
> +	struct memblock_region *reg;
> +
> +	for_each_memblock(memory, reg) {
> +		void *start = (void *)__phys_to_virt(reg->base);
> +		void *end = (void *)__phys_to_virt(reg->base + reg->size);
> +		unsigned long *shadow_start, *shadow_end;
> +
> +		if (start >= end)
> +			break;
> +		shadow_start = (unsigned long *)kasan_mem_to_shadow(start);
> +		shadow_end =  (unsigned long *)kasan_mem_to_shadow(end);
> +		for (; shadow_start < shadow_end; shadow_start += PAGE_SIZE/sizeof(unsigned long)) {
> +			*shadow_start = (unsigned long)shadow_start;
> +		}
> +	}
> +
> +	for_each_memblock(memory, reg) {
> +		void *start = (void *)__phys_to_virt(reg->base);
> +		void *end = (void *)__phys_to_virt(reg->base + reg->size);
> +		unsigned long *shadow_start, *shadow_end;
> +
> +		if (start >= end)
> +			break;
> +		shadow_start = (unsigned long *)kasan_mem_to_shadow(start);
> +		shadow_end =  (unsigned long *)kasan_mem_to_shadow(end);
> +		for (; shadow_start < shadow_end; shadow_start += PAGE_SIZE/sizeof(unsigned long)) {
> +			if (*shadow_start != (unsigned long)shadow_start) {
> +				pr_err("screwed shadow mapping %lx, %lx\n", *shadow_start, (unsigned long)shadow_start);
> +				goto clear;
> +			}
> +		}
> +	}
> +clear:
> +	for_each_memblock(memory, reg) {
> +		void *start = (void *)__phys_to_virt(reg->base);
> +		void *end = (void *)__phys_to_virt(reg->base + reg->size);
> +		unsigned long shadow_start, shadow_end;
> +
> +		if (start >= end)
> +			break;
> +		shadow_start =  (unsigned long)kasan_mem_to_shadow(start);
> +		shadow_end =  (unsigned long)kasan_mem_to_shadow(end);
> +		memset((void *)shadow_start, 0, shadow_end - shadow_start);
> +	}
> +
> +}
> +
>  void __init kasan_init(void)
>  {
>  	struct memblock_region *reg;
> @@ -159,6 +208,8 @@ void __init kasan_init(void)
>  	cpu_set_ttbr1(__pa(swapper_pg_dir));
>  	flush_tlb_all();
>  
> +	verify_shadow();
> +
>  	/* At this point kasan is fully initialized. Enable error messages */
>  	init_task.kasan_depth = 0;
>  	pr_info("KernelAddressSanitizer initialized\n");
> -- 
> 
> 



More information about the linux-arm-kernel mailing list