[PATCH bpf-next v9 1/5] bpf: Move constants blinding out of arch-specific JITs

Hari Bathini hbathini at linux.ibm.com
Fri Mar 13 02:18:59 PDT 2026



On 12/03/26 10:32 pm, Xu Kuohai wrote:
> From: Xu Kuohai <xukuohai at huawei.com>
> 
> During the JIT stage, constants blinding rewrites instructions but only
> rewrites the private instruction copy of the JITed subprog, leaving the
> global instructions and insn_aux_data unchanged. This causes a mismatch
> between subprog instructions and the global state, making it difficult
> to look up the global insn_aux_data in the JIT.
> 
> To avoid this mismatch, and given that all arch-specific JITs already
> support constants blinding, move it to the generic verifier code, and
> switch to rewrite the global env->insnsi with the global states
> adjusted, as other rewrites in the verifier do.
> 
> This removes the constants blinding calls in each JIT, which are largely
> duplicated code across architectures.
> 
> Since constants blinding is only required for JIT, and there are two
> entry functions for JIT, jit_subprogs() and bpf_prog_select_runtime(),
> move the constants blinding invocation into the two functions.
> 
> If constants blinding fails, or if it succeeds but the subsequent JIT
> compilation fails, kernel falls back to running the BPF program with
> interpreter. To ensure a correct rollback, the program cloning before
> instruction rewriting in the constants blinding is preserved. During
> the blinding process, only the cloned instructions are patched, leaving
> the original program untouched.
> 
> Since bpf_patch_insn_data() is chosen for the constants blinding in the
> verifier path, and it adjusts the global auxiliary data in the verifier
> state, a key question is whether this auxiliary data should be restored
> when JIT fails?
> 
> Besides instructions, bpf_patch_insn_data() adjusts env->insn_aux_data,
> env->subprog_info, prog->aux->poke_tab and env->insn_array_maps. env->
> insn_aux_data and env->subprog_info are no longer used after JIT failure
> and are freed at the end of bpf_check(). prog->aux->poke_tab is only
> used by JIT. And when the JIT fails, programs using insn_array would be
> rejected by bpf_insn_array_ready() function since no JITed addresses
> available. This means env->insn_array_maps is only useful for JIT.
> Therefore, all the auxiliary data adjusted does not need to be restored.
> 
> For classic BPF programs, constants blinding works as before since it
> is still invoked from bpf_prog_select_runtime().
> 
> Reviewed-by: Anton Protopopov <a.s.protopopov at gmail.com>
> Signed-off-by: Xu Kuohai <xukuohai at huawei.com>
> ---
>   arch/arc/net/bpf_jit_core.c      | 39 ++++++-----------
>   arch/arm/net/bpf_jit_32.c        | 41 +++---------------
>   arch/arm64/net/bpf_jit_comp.c    | 72 +++++++++----------------------
>   arch/loongarch/net/bpf_jit.c     | 59 ++++++++------------------
>   arch/mips/net/bpf_jit_comp.c     | 20 +--------
>   arch/parisc/net/bpf_jit_core.c   | 73 +++++++++++++-------------------
>   arch/powerpc/net/bpf_jit_comp.c  | 68 +++++++++++------------------
>   arch/riscv/net/bpf_jit_core.c    | 61 ++++++++++----------------
>   arch/s390/net/bpf_jit_comp.c     | 59 +++++++++-----------------
>   arch/sparc/net/bpf_jit_comp_64.c | 61 +++++++++-----------------
>   arch/x86/net/bpf_jit_comp.c      | 43 +++----------------
>   arch/x86/net/bpf_jit_comp32.c    | 33 ++-------------
>   include/linux/filter.h           | 11 ++++-
>   kernel/bpf/core.c                | 66 +++++++++++++++++++++++++----
>   kernel/bpf/verifier.c            | 40 +++++++++++------
>   15 files changed, 281 insertions(+), 465 deletions(-)
> 
[...]

> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
> index 52162e4a7f84..c9daa1a72378 100644
> --- a/arch/powerpc/net/bpf_jit_comp.c
> +++ b/arch/powerpc/net/bpf_jit_comp.c
> @@ -142,9 +142,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   	int flen;
>   	struct bpf_binary_header *fhdr = NULL;
>   	struct bpf_binary_header *hdr = NULL;
> -	struct bpf_prog *org_fp = fp;
> -	struct bpf_prog *tmp_fp;
> -	bool bpf_blinded = false;
>   	bool extra_pass = false;
>   	u8 *fimage = NULL;
>   	u32 *fcode_base;
> @@ -152,24 +149,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   	u32 fixup_len;
>   
>   	if (!fp->jit_requested)
> -		return org_fp;
> -
> -	tmp_fp = bpf_jit_blind_constants(org_fp);
> -	if (IS_ERR(tmp_fp))
> -		return org_fp;
> -
> -	if (tmp_fp != org_fp) {
> -		bpf_blinded = true;
> -		fp = tmp_fp;
> -	}
> +		return fp;
>   
>   	jit_data = fp->aux->jit_data;
>   	if (!jit_data) {
>   		jit_data = kzalloc_obj(*jit_data);
> -		if (!jit_data) {
> -			fp = org_fp;
> -			goto out;
> -		}
> +		if (!jit_data)
> +			return fp;
>   		fp->aux->jit_data = jit_data;
>   	}
>   
> @@ -194,10 +180,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   	}
>   
>   	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
> -	if (addrs == NULL) {
> -		fp = org_fp;
> -		goto out_addrs;
> -	}
> +	if (addrs == NULL)
> +		goto out_err;
>   
>   	memset(&cgctx, 0, sizeof(struct codegen_context));
>   	bpf_jit_init_reg_mapping(&cgctx);
> @@ -211,11 +195,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   	cgctx.exception_cb = fp->aux->exception_cb;
>   
>   	/* Scouting faux-generate pass 0 */
> -	if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
> +	if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false))
>   		/* We hit something illegal or unsupported. */
> -		fp = org_fp;
> -		goto out_addrs;
> -	}
> +		goto out_err;
>   
>   	/*
>   	 * If we have seen a tail call, we need a second pass.
> @@ -226,10 +208,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   	 */
>   	if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
>   		cgctx.idx = 0;
> -		if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
> -			fp = org_fp;
> -			goto out_addrs;
> -		}
> +		if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false))
> +			goto out_err;
>   	}
>   
>   	bpf_jit_realloc_regs(&cgctx);
> @@ -250,10 +230,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   
>   	fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
>   					      bpf_jit_fill_ill_insns);
> -	if (!fhdr) {
> -		fp = org_fp;
> -		goto out_addrs;
> -	}
> +	if (!fhdr)
> +		goto out_err;
>   
>   	if (extable_len)
>   		fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
> @@ -272,8 +250,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   				       extra_pass)) {
>   			bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
>   			bpf_jit_binary_pack_free(fhdr, hdr);
> -			fp = org_fp;
> -			goto out_addrs;
> +			goto out_err;
>   		}
>   		bpf_jit_build_epilogue(code_base, &cgctx);
>   
> @@ -295,15 +272,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   	((u64 *)image)[1] = local_paca->kernel_toc;
>   #endif
>   
> +	if (!fp->is_func || extra_pass) {
> +		if (bpf_jit_binary_pack_finalize(fhdr, hdr))
> +			goto out_err;
> +	}
> +
>   	fp->bpf_func = (void *)fimage;
>   	fp->jited = 1;
>   	fp->jited_len = cgctx.idx * 4 + FUNCTION_DESCR_SIZE;
>   
>   	if (!fp->is_func || extra_pass) {
> -		if (bpf_jit_binary_pack_finalize(fhdr, hdr)) {
> -			fp = org_fp;
> -			goto out_addrs;
> -		}
>   		bpf_prog_fill_jited_linfo(fp, addrs);
>   out_addrs:
>   		kfree(addrs);
> @@ -318,11 +296,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>   		jit_data->hdr = hdr;
>   	}
>   
> -out:
> -	if (bpf_blinded)
> -		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
> -
>   	return fp;
> +
> +out_err:
> +	if (extra_pass) {
> +		fp->bpf_func = NULL;
> +		fp->jited = 0;
> +		fp->jited_len = 0;
> +	}
> +	goto out_addrs;
>   }
>   
>   /*

Other than moving constants blinding out of arch code, this also
improved error handling in powerpc JIT. Looks good to me.
For the powerpc part:

Reviewed-by: Hari Bathini <hbathini at linux.ibm.com>



More information about the linux-arm-kernel mailing list