[arm-platforms:kvm-arm64/nvhe-on-vhe 3/14] arch/arm64/kernel/kaslr.c:99:84: error: expected ')' before '{' token

kernel test robot lkp at intel.com
Sat Mar 26 16:27:21 PDT 2022


tree:   https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git kvm-arm64/nvhe-on-vhe
head:   b3e5370b9d39ade12b5aad9dbc547bee38fc56a9
commit: 3efdb5446f1dd4a08c73a77c48f710576255d653 [3/14] arm64: Turn kaslr_feature_override into a generic SW feature override
config: arm64-defconfig (https://download.01.org/0day-ci/archive/20220327/202203270701.pMh57LZP-lkp@intel.com/config)
compiler: aarch64-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git/commit/?id=3efdb5446f1dd4a08c73a77c48f710576255d653
        git remote add arm-platforms https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git
        git fetch --no-tags arm-platforms kvm-arm64/nvhe-on-vhe
        git checkout 3efdb5446f1dd4a08c73a77c48f710576255d653
        # save the config file to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=arm64 SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp at intel.com>

All errors (new ones prefixed by >>):

   arch/arm64/kernel/kaslr.c:62:12: warning: no previous prototype for 'kaslr_early_init' [-Wmissing-prototypes]
      62 | u64 __init kaslr_early_init(void)
         |            ^~~~~~~~~~~~~~~~
   arch/arm64/kernel/kaslr.c: In function 'kaslr_early_init':
>> arch/arm64/kernel/kaslr.c:99:84: error: expected ')' before '{' token
      99 |                                                  ARM64_SW_FEATURE_OVERRIDE_NOKASLR) {
         |                                                                                    ^~
         |                                                                                    )
   arch/arm64/kernel/kaslr.c:97:12: note: to match this '('
      97 |         if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val &
         |            ^
>> arch/arm64/kernel/kaslr.c:185:1: error: expected expression before '}' token
     185 | }
         | ^
   arch/arm64/kernel/kaslr.c:66:23: warning: unused variable 'raw' [-Wunused-variable]
      66 |         unsigned long raw;
         |                       ^~~
   arch/arm64/kernel/kaslr.c:65:33: warning: unused variable 'module_range' [-Wunused-variable]
      65 |         u64 seed, offset, mask, module_range;
         |                                 ^~~~~~~~~~~~
   arch/arm64/kernel/kaslr.c:65:27: warning: unused variable 'mask' [-Wunused-variable]
      65 |         u64 seed, offset, mask, module_range;
         |                           ^~~~
   arch/arm64/kernel/kaslr.c:65:19: warning: unused variable 'offset' [-Wunused-variable]
      65 |         u64 seed, offset, mask, module_range;
         |                   ^~~~~~
   arch/arm64/kernel/kaslr.c:65:13: warning: variable 'seed' set but not used [-Wunused-but-set-variable]
      65 |         u64 seed, offset, mask, module_range;
         |             ^~~~
   arch/arm64/kernel/kaslr.c:185:1: error: control reaches end of non-void function [-Werror=return-type]
     185 | }
         | ^
   cc1: some warnings being treated as errors


vim +99 arch/arm64/kernel/kaslr.c

    53	
    54	/*
    55	 * This routine will be executed with the kernel mapped at its default virtual
    56	 * address, and if it returns successfully, the kernel will be remapped, and
    57	 * start_kernel() will be executed from a randomized virtual offset. The
    58	 * relocation will result in all absolute references (e.g., static variables
    59	 * containing function pointers) to be reinitialized, and zero-initialized
    60	 * .bss variables will be reset to 0.
    61	 */
    62	u64 __init kaslr_early_init(void)
    63	{
    64		void *fdt;
    65		u64 seed, offset, mask, module_range;
    66		unsigned long raw;
    67	
    68		/*
    69		 * Set a reasonable default for module_alloc_base in case
    70		 * we end up running with module randomization disabled.
    71		 */
    72		module_alloc_base = (u64)_etext - MODULES_VSIZE;
    73		dcache_clean_inval_poc((unsigned long)&module_alloc_base,
    74				    (unsigned long)&module_alloc_base +
    75					    sizeof(module_alloc_base));
    76	
    77		/*
    78		 * Try to map the FDT early. If this fails, we simply bail,
    79		 * and proceed with KASLR disabled. We will make another
    80		 * attempt at mapping the FDT in setup_machine()
    81		 */
    82		fdt = get_early_fdt_ptr();
    83		if (!fdt) {
    84			kaslr_status = KASLR_DISABLED_FDT_REMAP;
    85			return 0;
    86		}
    87	
    88		/*
    89		 * Retrieve (and wipe) the seed from the FDT
    90		 */
    91		seed = get_kaslr_seed(fdt);
    92	
    93		/*
    94		 * Check if 'nokaslr' appears on the command line, and
    95		 * return 0 if that is the case.
    96		 */
    97		if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val &
    98							 arm64_sw_feature_override.mask,
  > 99							 ARM64_SW_FEATURE_OVERRIDE_NOKASLR) {
   100			kaslr_status = KASLR_DISABLED_CMDLINE;
   101			return 0;
   102		}
   103	
   104		/*
   105		 * Mix in any entropy obtainable architecturally if enabled
   106		 * and supported.
   107		 */
   108	
   109		if (arch_get_random_seed_long_early(&raw))
   110			seed ^= raw;
   111	
   112		if (!seed) {
   113			kaslr_status = KASLR_DISABLED_NO_SEED;
   114			return 0;
   115		}
   116	
   117		/*
   118		 * OK, so we are proceeding with KASLR enabled. Calculate a suitable
   119		 * kernel image offset from the seed. Let's place the kernel in the
   120		 * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
   121		 * the lower and upper quarters to avoid colliding with other
   122		 * allocations.
   123		 * Even if we could randomize at page granularity for 16k and 64k pages,
   124		 * let's always round to 2 MB so we don't interfere with the ability to
   125		 * map using contiguous PTEs
   126		 */
   127		mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
   128		offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
   129	
   130		/* use the top 16 bits to randomize the linear region */
   131		memstart_offset_seed = seed >> 48;
   132	
   133		if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) &&
   134		    (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
   135		     IS_ENABLED(CONFIG_KASAN_SW_TAGS)))
   136			/*
   137			 * KASAN without KASAN_VMALLOC does not expect the module region
   138			 * to intersect the vmalloc region, since shadow memory is
   139			 * allocated for each module at load time, whereas the vmalloc
   140			 * region is shadowed by KASAN zero pages. So keep modules
   141			 * out of the vmalloc region if KASAN is enabled without
   142			 * KASAN_VMALLOC, and put the kernel well within 4 GB of the
   143			 * module region.
   144			 */
   145			return offset % SZ_2G;
   146	
   147		if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
   148			/*
   149			 * Randomize the module region over a 2 GB window covering the
   150			 * kernel. This reduces the risk of modules leaking information
   151			 * about the address of the kernel itself, but results in
   152			 * branches between modules and the core kernel that are
   153			 * resolved via PLTs. (Branches between modules will be
   154			 * resolved normally.)
   155			 */
   156			module_range = SZ_2G - (u64)(_end - _stext);
   157			module_alloc_base = max((u64)_end + offset - SZ_2G,
   158						(u64)MODULES_VADDR);
   159		} else {
   160			/*
   161			 * Randomize the module region by setting module_alloc_base to
   162			 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
   163			 * _stext) . This guarantees that the resulting region still
   164			 * covers [_stext, _etext], and that all relative branches can
   165			 * be resolved without veneers unless this region is exhausted
   166			 * and we fall back to a larger 2GB window in module_alloc()
   167			 * when ARM64_MODULE_PLTS is enabled.
   168			 */
   169			module_range = MODULES_VSIZE - (u64)(_etext - _stext);
   170			module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
   171		}
   172	
   173		/* use the lower 21 bits to randomize the base of the module region */
   174		module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
   175		module_alloc_base &= PAGE_MASK;
   176	
   177		dcache_clean_inval_poc((unsigned long)&module_alloc_base,
   178				    (unsigned long)&module_alloc_base +
   179					    sizeof(module_alloc_base));
   180		dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
   181				    (unsigned long)&memstart_offset_seed +
   182					    sizeof(memstart_offset_seed));
   183	
   184		return offset;
 > 185	}
   186	

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp



More information about the linux-arm-kernel mailing list