[PATCH v5] ARM: net: JIT compiler for packet filters
Rabin Vincent
rabin at rab.in
Tue Dec 27 12:29:45 EST 2011
On Fri, Dec 23, 2011 at 06:23, Mircea Gherzan <mgherzan at gmail.com> wrote:
> Based of Matt Evans's PPC64 implementation.
>
> The compiler generates ARM instructions but interworking is
> supported for Thumb2 kernels.
>
> Supports both little and big endian. Unaligned loads are emitted
> for ARMv6+. Not all the BPF opcodes that deal with ancillary data
> are supported. The scratch memory of the filter lives on the stack.
> Hardware integer division is used if it is available.
>
> Enabled in the same way as for x86-64 and PPC64:
>
> echo 1 > /proc/sys/net/core/bpf_jit_enable
>
> A value greater than 1 enables opcode output.
>
> Signed-off-by: Mircea Gherzan <mgherzan at gmail.com>
> ---
I've had a somewhat incomplete version of this (supporting ARMV6+ only,
but Thumb-2 too) sitting around for a while since I wasn't happy with
the amount of testing I gave it.
Anyway, I was able to use the tests I did already have to find some bugs
in this patch.
The failures are from test code which generates packets to cover all
possible paths for some test filters and then compares the result (only
the final return value) against the interpreter. I only ran it for
ARMv7 with no frame pointers.
> +/*
> + * Emit an instruction that will be executed unconditionally.
> + */
> +static inline void emit(u32 inst, struct jit_ctx *ctx)
> +{
> + _emit(ARM_COND_AL, inst, ctx);
> +}
> +
> +static u16 saved_regs(struct jit_ctx *ctx)
> +{
> + u16 ret = 0;
> +
> + if (ctx->skf->len > 1)
> + ret |= 1 << r_A;
> +
TEST 6 ==========================================
BPF_STMT(BPF_RET+BPF_A, 0),
Code: e3a04000 e1a00004 e12fff1e (0000000000)
(0) retinterp 0 retjit 0
+OK
qemu: uncaught target signal 11 (Segmentation fault) - core dumped
All code
========
0: e3a04000 mov r4, #0 <- clobber
4: e1a00004 mov r0, r4
8: e12fff1e bx lr
> +#ifdef CONFIG_FRAME_POINTER
> + ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
> +#else
> + if (ctx->seen & SEEN_CALL)
> + ret |= 1 << ARM_LR;
> +#endif
> + if (ctx->seen & (SEEN_DATA | SEEN_SKB))
> + ret |= 1 << r_skb;
> + if (ctx->seen & SEEN_DATA)
> + ret |= (1 << r_skb_data) | (1 << r_skb_hl);
> + if (ctx->seen & SEEN_X)
> + ret |= 1 << r_X;
> +
> + return ret;
> +}
> +
> +static inline int mem_words_used(struct jit_ctx *ctx)
> +{
> + u32 words = ctx->seen & SEEN_MEM;
> + /* yes, we do waste some stack space IF there are "holes" in the set" */
> + return 32 - __builtin_clz(words);
You could use fls(words) here.
> +}
> +
> +static inline bool is_load_to_a(u16 inst)
> +{
> + switch (inst) {
> + case BPF_S_LD_W_LEN:
> + case BPF_S_LD_W_ABS:
> + case BPF_S_LD_H_ABS:
> + case BPF_S_LD_B_ABS:
> + case BPF_S_ANC_CPU:
> + case BPF_S_ANC_IFINDEX:
> + case BPF_S_ANC_MARK:
> + case BPF_S_ANC_PROTOCOL:
> + case BPF_S_ANC_RXHASH:
> + case BPF_S_ANC_QUEUE:
> + return true;
> + default:
> + return false;
> + }
> +}
> +
> +static void build_prologue(struct jit_ctx *ctx)
> +{
> + u16 reg_set = saved_regs(ctx);
> + u16 first_inst = ctx->skf->insns[0].code;
> + u16 off;
> +
> +#ifdef CONFIG_FRAME_POINTER
> + emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
> + emit(ARM_PUSH(reg_set), ctx);
> + emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
> +#else
> + if (reg_set)
> + emit(ARM_PUSH(reg_set), ctx);
> +#endif
> +
> + if (ctx->seen & (SEEN_DATA | SEEN_SKB))
> + emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
> +
> + if (ctx->seen & SEEN_DATA) {
> + off = offsetof(struct sk_buff, data);
> + emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
> + /* headlen = len - data_len */
> + off = offsetof(struct sk_buff, len);
> + emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
> + off = offsetof(struct sk_buff, data_len);
> + emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
> + emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
> + }
> +
> + if (ctx->seen & SEEN_X)
> + emit(ARM_MOV_I(r_X, 0), ctx);
> +
> + /* do not leak kernel data to userspace */
> + if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
> + emit(ARM_MOV_I(r_A, 0), ctx);
> +
> + /* stack space for the BPF_MEM words */
> + if (ctx->seen & SEEN_MEM)
> + emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
> +}
> +
> +static void build_epilogue(struct jit_ctx *ctx)
> +{
> + u16 reg_set = saved_regs(ctx);
> +
> + if (ctx->seen & SEEN_MEM)
> + emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
> +
> + reg_set &= ~(1 << ARM_LR);
> +
> +#ifdef CONFIG_FRAME_POINTER
> + /* the first instruction of the prologue was: mov ip, sp */
> + reg_set &= ~(1 << ARM_IP);
> + reg_set |= (1 << ARM_SP);
> + emit(ARM_LDM(ARM_SP, reg_set), ctx);
> +#else
> + if (ctx->seen) {
> + if (ctx->seen & SEEN_CALL)
> + reg_set |= 1 << ARM_PC;
> + emit(ARM_POP(reg_set), ctx);
> + }
TEST 9 ==========================================
BPF_STMT(BPF_ALU+BPF_ADD+BPF_K, 1000),
BPF_STMT(BPF_RET+BPF_A, 0)
Code: e92d0010 e3a04000 e2844ffa e1a00004 e12fff1e (0000000000)
(0) retinterp 0x3e8 retjit 0x3e8
+OK
qemu: uncaught target signal 11 (Segmentation fault) - core dumped
All code
========
0: e92d0010 push {r4}
4: e3a04000 mov r4, #0
8: e2844ffa add r4, r4, #1000 ; 0x3e8
c: e1a00004 mov r0, r4
10: e12fff1e bx lr <-- no pop
I added a || ctx->skf->len > 1 condition (still not correct because
of the earlier failure) to the if above since I had several tests that
would hit this bug.
> +
> + if (!(ctx->seen & SEEN_CALL))
> + emit(ARM_BX(ARM_LR), ctx);
> +#endif
> +}
> +
...
> + case BPF_S_LD_B_ABS:
> + load_order = 0;
> +load:
> + emit_mov_i(r_off, k, ctx);
> +load_common:
> + ctx->seen |= SEEN_DATA | SEEN_CALL;
> +
> + if (load_order > 0) {
> + emit(ARM_SUB_I(r_scratch, r_skb_hl,
> + 1 << load_order), ctx);
> + emit(ARM_CMP_R(r_scratch, r_off), ctx);
> + condt = ARM_COND_HS;
> + } else {
> + emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
> + condt = ARM_COND_HI;
> + }
> +
> + _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
> + ctx);
> +
> + if (load_order == 0)
> + _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
> + ctx);
> + else if (load_order == 1)
> + emit_load_be16(condt, r_A, r_scratch, ctx);
> + else if (load_order == 2)
> + emit_load_be32(condt, r_A, r_scratch, ctx);
> +
> + _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
> +
> + /* the slowpath */
> + emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
> + emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
> + /* the offset is already in R1 */
> + emit_blx_r(ARM_R3, ctx);
> + emit(ARM_MOV_R(r_A, ARM_R0), ctx);
You're missing the "if (invalid data reference) return 0" behaviour.
> + break;
> + case BPF_S_LD_W_IND:
> + load_order = 2;
> + goto load_ind;
> + case BPF_S_LD_H_IND:
> + load_order = 1;
> + goto load_ind;
> + case BPF_S_LD_B_IND:
> + load_order = 0;
> +load_ind:
> + OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
> + goto load_common;
> + case BPF_S_LDX_IMM:
> + ctx->seen |= SEEN_X;
> + emit_mov_i(r_X, k, ctx);
> + break;
> + case BPF_S_LDX_W_LEN:
> + ctx->seen |= SEEN_X | SEEN_SKB;
> + emit(ARM_LDR_I(r_X, r_skb,
> + offsetof(struct sk_buff, len)), ctx);
> + break;
> + case BPF_S_LDX_MEM:
> + ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
> + emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
> + break;
> + case BPF_S_LDX_B_MSH:
> + /* x = ((*(frame + k)) & 0xf) << 2; */
> + ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
> + /* offset in r1: we might have to take the slow path */
> + emit_mov_i(r_off, k, ctx);
> + emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
You don't appear to be performing the "punt to interpreter" handling for
the negative K values.
> +
> + /* load in r0: common with the slowpath */
> + _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
> + ARM_R1), ctx);
> + /*
> + * emit_mov_i() might generate one or two instructions,
> + * the same holds for emit_blx_r()
> + */
> + _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
> +
> + emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
> + /* r_off is r1 */
> + emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
> + emit_blx_r(ARM_R3, ctx);
"if (invalid address) return 0" behaviour needed here too.
> +
> + emit(ARM_ORR_I(r_X, ARM_R0, 0x00f), ctx);
This should be AND, not ORR.
TEST 67 ==========================================
(tcp[0:2] > 1500 and tcp[0:2] < 1550) or (tcp[2:2] > 1500 and tcp[2:2] < 1550)
(0) retinterp 0 retjit 0
+OK
(1) retinterp 0 retjit 0
+OK
(2) retinterp 0 retjit 0
+OK
(3) retinterp 0 retjit 0
+OK
(4) retinterp 0 retjit 0
+OK
(5) retinterp 0 retjit 0
+OK
(6) retinterp 0 retjit 0
+OK
(7) retinterp 0 retjit 0
+OK
(8) retinterp 0 retjit 0
+OK
(9) retinterp 0xffff retjit 0
-ERROR different rets
> + emit(ARM_LSL_I(r_X, r_X, 2), ctx);
> + break;
> + case BPF_S_ST:
> + ctx->seen |= SEEN_MEM_WORD(k);
> + emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
> + break;
> + case BPF_S_STX:
> + ctx->seen |= SEEN_MEM_WORD(k) | SEEN_X;
> + emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
> + break;
> + case BPF_S_ALU_ADD_K:
> + /* A += K */
> + OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
> + break;
> + case BPF_S_ALU_ADD_X:
> + ctx->seen |= SEEN_X;
> + emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
> + break;
> + case BPF_S_ALU_SUB_K:
> + /* A -= K */
> + OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
> + break;
> + case BPF_S_ALU_SUB_X:
> + ctx->seen |= SEEN_X;
> + emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
> + break;
> + case BPF_S_ALU_MUL_K:
> + /* A *= K */
> + emit_mov_i(r_scratch, k, ctx);
> + emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
> + break;
> + case BPF_S_ALU_MUL_X:
> + ctx->seen |= SEEN_X;
> + emit(ARM_MUL(r_A, r_A, r_X), ctx);
> + break;
> + case BPF_S_ALU_DIV_K:
> + /* current k == reciprocal_value(userspace k) */
> + emit_mov_i(r_scratch, k, ctx);
> + /* A = top 32 bits of the product */
> + emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
> + break;
> + case BPF_S_ALU_DIV_X:
> + ctx->seen |= SEEN_X;
> + emit(ARM_CMP_I(r_X, 0), ctx);
> + emit_err_ret(ARM_COND_EQ, ctx);
> + emit_udiv(r_A, r_A, r_X, ctx);
> + break;
> + case BPF_S_ALU_OR_K:
> + /* A |= K */
> + OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
> + break;
> + case BPF_S_ALU_OR_X:
> + ctx->seen |= SEEN_X;
> + emit(ARM_ORR_I(r_A, r_A, r_X), ctx);
TEST 19 ==========================================
BPF_STMT(BPF_ALU+BPF_OR+BPF_X, 0),
BPF_STMT(BPF_RET+BPF_A, 0)
Code: e92d0030 e3a05000 e3a04000 e3844005 e1a00004 e8bd0030 e12fff1e
(0000000000)
(0) retinterp 0 retjit 0x5
-ERROR different rets
All code
========
0: e92d0030 push {r4, r5}
4: e3a05000 mov r5, #0
8: e3a04000 mov r4, #0
c: e3844005 orr r4, r4, #5 <-- should be register
10: e1a00004 mov r0, r4
14: e8bd0030 pop {r4, r5}
18: e12fff1e bx lr
> + break;
> + case BPF_S_ALU_AND_K:
> + /* A &= K */
> + OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
> + break;
> + case BPF_S_ALU_AND_X:
> + ctx->seen |= SEEN_X;
> + emit(ARM_AND_R(r_A, r_A, r_X), ctx);
> + break;
> + case BPF_S_ALU_LSH_K:
> + if (unlikely(k > 31))
> + return -1;
> + emit(ARM_LSL_I(r_A, r_A, k), ctx);
> + break;
TEST 23 ==========================================
BPF_STMT(BPF_ALU+BPF_LSH+BPF_K, 5),
BPF_STMT(BPF_RET+BPF_A, 0)
Code: e92d0010 e3a04000 e1a04294 e1a00004 e8bd0010 e12fff1e (0000000000)
(0) retinterp 0 retjit 0x1
-ERROR different rets
All code
========
0: e92d0010 push {r4}
4: e3a04000 mov r4, #0
8: e1a04294 lsl r4, r4, r2 <-- should be immediate
c: e1a00004 mov r0, r4
10: e8bd0010 pop {r4}
14: e12fff1e bx lr
> + case BPF_S_ALU_LSH_X:
> + ctx->seen |= SEEN_X;
> + emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
> + break;
> + case BPF_S_ALU_RSH_K:
> + if (unlikely(k > 31))
> + return -1;
> + emit(ARM_LSR_I(r_A, r_A, k), ctx);
> + break;
TEST 91 ==========================================
(ip[24] >> 1) + ip[25] == 1
(0) retinterp 0 retjit 0
+OK
(1) retinterp 0 retjit 0
+OK
(2) retinterp 0xffff retjit 0
-ERROR different rets
All code
========
...
78: e1a040b4 strh r4, [r0, r4]! <-- rsh #1
...
(register/imm mixup)
> + case BPF_S_ALU_RSH_X:
> + ctx->seen |= SEEN_X;
> + emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
> + break;
You're missing NEG, which should be straightforward to implement.
> + case BPF_S_JMP_JA:
> + /* pc += K */
> + emit(ARM_B(b_imm(i + k, ctx)), ctx);
> + break;
TEST 39 ==========================================
BPF_STMT(BPF_JMP+BPF_JA, 0),
BPF_STMT(BPF_LD+BPF_IMM, 0x123)
BPF_STMT(BPF_RET+BPF_A, 0)
Code: e92d0010 e3a04000 fffffffe e3004123 e1a00004 e8bd0010 e12fff1e
(0000000000)
qemu: uncaught target signal 4 (Illegal instruction) - core dumped
Illegal instruction
All code
========
0: e92d0010 push {r4}
4: e3a04000 mov r4, #0
8: fffffffe undefined instruction 0xfffffffe
c: e3004123 movw r4, #291 ; 0x123
10: e1a00004 mov r0, r4
14: e8bd0010 pop {r4}
18: e12fff1e bx lr
(1) ARM_B doesn't mask off the relevant bits
(2) b_imm should be passed i + k + 1, not i + k
> + case BPF_S_JMP_JEQ_K:
> + /* pc += (A == K) ? pc->jt : pc->jf */
> + condt = ARM_COND_EQ;
> + condf = ARM_COND_NE;
Since condf == condt ^ 1, you can just do that in cond_jump and remove
the explicit assignments to it.
> + goto cmp_imm;
> + case BPF_S_JMP_JGT_K:
> + /* pc += (A > K) ? pc->jt : pc->jf */
> + condt = ARM_COND_HI;
> + condf = ARM_COND_LS;
> + goto cmp_imm;
> + case BPF_S_JMP_JGE_K:
> + /* pc += (A >= K) ? pc->jt : pc->jf */
> + condt = ARM_COND_HI;
> + condf = ARM_COND_LS;
> + /* (x >= y) IFF (x > y - 1) */
> + if (unlikely(k == 0))
> + return -1;
> + k--;
Why don't you just use ARM_COND_HS here?
> +cmp_imm:
> + imm12 = imm8m(k);
> + if (imm12 < 0) {
> + emit_mov_i_no8m(r_scratch, k, ctx);
> + emit(ARM_CMP_R(r_A, r_scratch), ctx);
> + } else {
> + emit(ARM_CMP_I(r_A, imm12), ctx);
> + }
> +cond_jump:
> + if (inst->jt)
> + _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
> + ctx)), ctx);
> + if (inst->jf)
> + _emit(condf, ARM_B(b_imm(i + inst->jf + 1,
> + ctx)), ctx);
> + break;
> + case BPF_S_JMP_JEQ_X:
> + /* pc += (A == X) ? pc->jt : pc->jf */
> + condt = ARM_COND_EQ;
> + condf = ARM_COND_NE;
> + goto cmp_x;
> + case BPF_S_JMP_JGT_X:
> + /* pc += (A > X) ? pc->jt : pc->jf */
> + condt = ARM_COND_HI;
> + condf = ARM_COND_LS;
> + goto cmp_x;
> + case BPF_S_JMP_JGE_X:
> + /* pc += (A >= X) ? pc->jt : pc->jf */
> + condt = ARM_COND_CS;
> + condf = ARM_COND_CC;
> +cmp_x:
> + ctx->seen |= SEEN_X;
> + emit(ARM_CMP_R(r_A, r_X), ctx);
> + goto cond_jump;
> + case BPF_S_JMP_JSET_K:
> + /* pc += (A & K) ? pc->jt : pc->jf */
> + condt = ARM_COND_NE;
> + /* not set iff all zeroes iff Z==1 iff EQ */
> + condf = ARM_COND_EQ;
> +
> + imm12 = imm8m(k);
> + if (imm12 < 0) {
> + emit_mov_i_no8m(r_scratch, k, ctx);
> + emit(ARM_TST_R(r_A, r_scratch), ctx);
> + } else {
> + emit(ARM_TST_I(r_A, imm12), ctx);
> + }
> + goto cond_jump;
> + case BPF_S_JMP_JSET_X:
> + /* pc += (A & X) ? pc->jt : pc->jf */
> + condt = ARM_COND_NE;
> + condf = ARM_COND_EQ;
> + emit(ARM_TST_R(r_A, r_X), ctx);
> + goto cond_jump;
> + case BPF_S_RET_A:
> + emit(ARM_MOV_R(ARM_R0, r_A), ctx);
> + goto b_epilogue;
> + case BPF_S_RET_K:
> + if ((k == 0) && (ctx->ret0_fp_idx < 0))
> + ctx->ret0_fp_idx = i;
> + emit_mov_i(ARM_R0, k, ctx);
> +b_epilogue:
> + if (i != ctx->skf->len - 1)
> + emit(ARM_B(b_imm(prog->len, ctx)), ctx);
> + break;
> + case BPF_S_MISC_TAX:
> + /* X = A */
> + emit(ARM_MOV_R(r_X, r_A), ctx);
> + break;
TEST 37 ==========================================
BPF_STMT(BPF_MISC+BPF_TAX, 0),
BPF_STMT(BPF_RET+BPF_A, 0)
Code: e92d0010 e3a04000 e1a05004 e1a00004 e8bd0010 e12fff1e (0000000000)
(0) retinterp 0 retjit 0
+OK
qemu: uncaught target signal 11 (Segmentation fault) - core dumped
All code
========
0: e92d0010 push {r4}
4: e3a04000 mov r4, #0
8: e1a05004 mov r5, r4 <- clobber
c: e1a00004 mov r0, r4
10: e8bd0010 pop {r4}
14: e12fff1e bx lr
> + case BPF_S_MISC_TXA:
> + /* A = X */
> + emit(ARM_MOV_R(r_A, r_X), ctx);
> + break;
TEST 38 ==========================================
BPF_STMT(BPF_MISC+BPF_TXA, 0),
BPF_STMT(BPF_RET+BPF_A, 0)
Code: e92d0010 e3a04000 e1a04005 e1a00004 e8bd0010 e12fff1e (0000000000)
(0) retinterp 0 retjit 0
+OK
(1) retinterp 0 retjit 0x1
-ERROR different rets
All code
========
0: e92d0010 push {r4}
4: e3a04000 mov r4, #0
8: e1a04005 mov r4, r5 <- uninitialized
c: e1a00004 mov r0, r4
10: e8bd0010 pop {r4}
14: e12fff1e bx lr
...
> +void bpf_jit_compile(struct sk_filter *fp)
> +{
> + struct jit_ctx ctx;
> + unsigned tmp_idx;
> + unsigned alloc_size;
> +
> + if (!bpf_jit_enable)
> + return;
> +
> + memset(&ctx, 0, sizeof(ctx));
> + ctx.skf = fp;
> + ctx.ret0_fp_idx = -1;
> +
> + ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1));
> + if (ctx.offsets == NULL)
> + return;
> +
> + /* fake pass to fill in the ctx->seen */
> + if (unlikely(build_body(&ctx)))
> + goto out;
> +
> + tmp_idx = ctx.idx;
> + build_prologue(&ctx);
> + ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
> +
> +#if __LINUX_ARM_ARCH__ < 7
> + tmp_idx = ctx.idx;
> + build_epilogue(&ctx);
> + ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
> +
> + ctx.idx += ctx.imm_count;
> + if (ctx.imm_count) {
> + ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count);
> + if (ctx.imms == NULL)
> + goto out;
> + }
> +#else
> + /* there's nothing after the epilogue on ARMv7 */
> + build_epilogue(&ctx);
> +#endif
> +
> + alloc_size = 4 * ctx.idx;
> + ctx.target = module_alloc(alloc_size > sizeof(struct work_struct) ?
> + alloc_size : sizeof(struct work_struct));
max()
More information about the linux-arm-kernel
mailing list