[PATCH bpf-next 2/2] arm64, bpf: support more atomic operations

Hou Tao houtao1 at huawei.com
Thu Jan 27 00:10:33 PST 2022


Hi,

On 1/27/2022 2:06 PM, John Fastabend wrote:
> Hou Tao wrote:
>> Atomics for eBPF patch series adds support for atomic[64]_fetch_add,
>> atomic[64]_[fetch_]{and,or,xor} and atomic[64]_{xchg|cmpxchg}, but
>> it only add support for x86-64, so support these atomic operations
>> for arm64 as well.
>>
>> +static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
>> +{
>> +	const u8 code = insn->code;
>> +	const u8 dst = bpf2a64[insn->dst_reg];
>> +	const u8 src = bpf2a64[insn->src_reg];
>> +	const u8 tmp = bpf2a64[TMP_REG_1];
>> +	const u8 tmp2 = bpf2a64[TMP_REG_2];
>> +	const bool isdw = BPF_SIZE(code) == BPF_DW;
>> +	const s16 off = insn->off;
>> +	u8 reg;
>> +
>> +	if (!off) {
>> +		reg = dst;
>> +	} else {
>> +		emit_a64_mov_i(1, tmp, off, ctx);
>> +		emit(A64_ADD(1, tmp, tmp, dst), ctx);
>> +		reg = tmp;
>> +	}
>> +
>> +	switch (insn->imm) {
> Diff'ing X86 implementation which has a BPF_SUB case how is it avoided
> here?
I think it is just left over from patchset [1], because according to the LLVM
commit [2]
__sync_fetch_and_sub(&addr, value) is implemented by __sync_fetch_and_add(&addr,
-value).
I will post a patch to remove it.

[1]: https://lore.kernel.org/bpf/20210114181751.768687-1-jackmanb@google.com/
[2]: https://reviews.llvm.org/D72184
>> +	/* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
>> +	case BPF_ADD:
>> +		emit(A64_STADD(isdw, reg, src), ctx);
>> +		break;
>> +	case BPF_AND:
>> +		emit(A64_MVN(isdw, tmp2, src), ctx);
>> +		emit(A64_STCLR(isdw, reg, tmp2), ctx);
>> +		break;
>> +	case BPF_OR:
>> +		emit(A64_STSET(isdw, reg, src), ctx);
>> +		break;
>> +	case BPF_XOR:
>> +		emit(A64_STEOR(isdw, reg, src), ctx);
>> +		break;
>> +	/* src_reg = atomic_fetch_add(dst_reg + off, src_reg) */
>> +	case BPF_ADD | BPF_FETCH:
>> +		emit(A64_LDADDAL(isdw, src, reg, src), ctx);
>> +		break;
>> +	case BPF_AND | BPF_FETCH:
>> +		emit(A64_MVN(isdw, tmp2, src), ctx);
>> +		emit(A64_LDCLRAL(isdw, src, reg, tmp2), ctx);
>> +		break;
>> +	case BPF_OR | BPF_FETCH:
>> +		emit(A64_LDSETAL(isdw, src, reg, src), ctx);
>> +		break;
>> +	case BPF_XOR | BPF_FETCH:
>> +		emit(A64_LDEORAL(isdw, src, reg, src), ctx);
>> +		break;
>> +	/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
>> +	case BPF_XCHG:
>> +		emit(A64_SWPAL(isdw, src, reg, src), ctx);
>> +		break;
>> +	/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
>> +	case BPF_CMPXCHG:
>> +		emit(A64_CASAL(isdw, src, reg, bpf2a64[BPF_REG_0]), ctx);
>> +		break;
>> +	default:
>> +		pr_err_once("unknown atomic op code %02x\n", insn->imm);
>> +		return -EINVAL;
> Was about to suggest maybe EFAULT to align with x86, but on second
> thought seems arm jit uses EINVAL more universally so best to be
> self consistent. Just an observation.
OK. So I will still return -EINVAL for invalid atomic operation.
>
>> +	}
>> +
>> +	return 0;
>> +}
>> +
> .




More information about the linux-arm-kernel mailing list