[PATCH 12/13] x86/jitalloc: prepare to allocate exectuatble memory as ROX
Edgecombe, Rick P
rick.p.edgecombe at intel.com
Thu Jun 1 09:54:27 PDT 2023
On Thu, 2023-06-01 at 13:12 +0300, Mike Rapoport wrote:
> /*
> * Are we looking at a near JMP with a 1 or 4-byte displacement.
> @@ -331,7 +344,7 @@ void __init_or_module noinline
> apply_alternatives(struct alt_instr *start,
>
> DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn:
> ", instr);
>
> - text_poke_early(instr, insn_buff, insn_buff_sz);
> + do_text_poke(instr, insn_buff, insn_buff_sz);
>
> next:
> optimize_nops(instr, a->instrlen);
> @@ -564,7 +577,7 @@ void __init_or_module noinline
> apply_retpolines(s32 *start, s32 *end)
> optimize_nops(bytes, len);
> DUMP_BYTES(((u8*)addr), len, "%px: orig: ",
> addr);
> DUMP_BYTES(((u8*)bytes), len, "%px: repl: ",
> addr);
> - text_poke_early(addr, bytes, len);
> + do_text_poke(addr, bytes, len);
> }
> }
> }
> @@ -638,7 +651,7 @@ void __init_or_module noinline apply_returns(s32
> *start, s32 *end)
> if (len == insn.length) {
> DUMP_BYTES(((u8*)addr), len, "%px: orig: ",
> addr);
> DUMP_BYTES(((u8*)bytes), len, "%px: repl: ",
> addr);
> - text_poke_early(addr, bytes, len);
> + do_text_poke(addr, bytes, len);
> }
> }
> }
> @@ -674,7 +687,7 @@ static void poison_endbr(void *addr, bool warn)
> */
> DUMP_BYTES(((u8*)addr), 4, "%px: orig: ", addr);
> DUMP_BYTES(((u8*)&poison), 4, "%px: repl: ", addr);
> - text_poke_early(addr, &poison, 4);
> + do_text_poke(addr, &poison, 4);
> }
>
> /*
> @@ -869,7 +882,7 @@ static int cfi_disable_callers(s32 *start, s32
> *end)
> if (!hash) /* nocfi callers */
> continue;
>
> - text_poke_early(addr, jmp, 2);
> + do_text_poke(addr, jmp, 2);
> }
>
> return 0;
> @@ -892,7 +905,7 @@ static int cfi_enable_callers(s32 *start, s32
> *end)
> if (!hash) /* nocfi callers */
> continue;
>
> - text_poke_early(addr, mov, 2);
> + do_text_poke(addr, mov, 2);
> }
>
> return 0;
> @@ -913,7 +926,7 @@ static int cfi_rand_preamble(s32 *start, s32
> *end)
> return -EINVAL;
>
> hash = cfi_rehash(hash);
> - text_poke_early(addr + 1, &hash, 4);
> + do_text_poke(addr + 1, &hash, 4);
> }
>
> return 0;
> @@ -932,9 +945,9 @@ static int cfi_rewrite_preamble(s32 *start, s32
> *end)
> addr, addr, 5, addr))
> return -EINVAL;
>
> - text_poke_early(addr, fineibt_preamble_start,
> fineibt_preamble_size);
> + do_text_poke(addr, fineibt_preamble_start,
> fineibt_preamble_size);
> WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) !=
> 0x12345678);
> - text_poke_early(addr + fineibt_preamble_hash, &hash,
> 4);
> + do_text_poke(addr + fineibt_preamble_hash, &hash, 4);
> }
It is just a local flush, but I wonder how much text_poke()ing is too
much. A lot of the are even inside loops. Can't it do the batch version
at least?
The other thing, and maybe this is in paranoia category, but it's
probably at least worth noting. Before the modules were not made
executable until all of the code was finalized. Now they are made
executable in an intermediate state and then patched later. It might
weaken the CFI stuff, but also it just kind of seems a bit unbounded
for dealing with executable code.
Preparing the modules in a separate RW mapping, and then text_poke()ing
the whole thing in when you are done would resolve both of these.
More information about the linux-arm-kernel
mailing list