Lines Matching +full:function +full:- +full:off

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Just-In-Time compiler for eBPF bytecode on MIPS.
4 * Implementation of JIT functions for 64-bit CPUs.
18 #include <asm/cpu-features.h>
19 #include <asm/isa-rev.h>
24 /* MIPS t0-t3 are not available in the n64 ABI */
30 /* Stack is 16-byte aligned in n64 ABI */
33 /* Extra 64-bit eBPF registers used by JIT */
40 /* Callee-saved CPU registers that the JIT must preserve */
54 /* Caller-saved CPU registers available for JIT use */
60 * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers.
61 * MIPS registers t4 - t7 may be used by the JIT as temporary registers.
62 * MIPS registers t8 - t9 are reserved for single-register common functions.
65 /* Return value from in-kernel function, and exit value from eBPF */
67 /* Arguments from eBPF program to in-kernel function */
73 /* Callee-saved registers that in-kernel function will preserve */
78 /* Read-only frame pointer to access the eBPF stack */
82 /* Tail call count register, caller-saved */
84 /* Constant for register zero-extension */
89 * MIPS 32-bit operations on 64-bit registers generate a sign-extended
90 * result. However, the eBPF ISA mandates zero-extension, so we rely on the
92 * operations, right shift and byte swap require properly sign-extended
93 * operands or the result is unpredictable. We emit explicit sign-extensions
119 if (!ctx->program->aux->verifier_zext) in emit_zext_ver()
123 /* dst = imm (64-bit) */
138 u16 half = imm64 >> (48 - 16 * k); in emit_mov_i64()
157 /* ALU immediate operation (64-bit) */
169 /* dst = -dst */ in emit_alu_i64()
189 /* dst = dst - imm */ in emit_alu_i64()
191 emit(ctx, daddiu, dst, dst, -imm); in emit_alu_i64()
194 /* Width-generic operations */ in emit_alu_i64()
200 /* ALU register operation (64-bit) */
220 /* dst = dst - src */ in emit_alu_r64()
255 /* Width-generic operations */ in emit_alu_r64()
321 /* Zero-extend a word */ in emit_trunc_r64()
325 /* Zero-extend a half word */ in emit_trunc_r64()
333 /* Load operation: dst = *(size*)(src + off) */
334 static void emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) in emit_ldx() argument
339 emit(ctx, lbu, dst, off, src); in emit_ldx()
343 emit(ctx, lhu, dst, off, src); in emit_ldx()
347 emit(ctx, lwu, dst, off, src); in emit_ldx()
351 emit(ctx, ld, dst, off, src); in emit_ldx()
357 /* Store operation: *(size *)(dst + off) = src */
358 static void emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) in emit_stx() argument
363 emit(ctx, sb, src, off, dst); in emit_stx()
367 emit(ctx, sh, src, off, dst); in emit_stx()
371 emit(ctx, sw, src, off, dst); in emit_stx()
375 emit(ctx, sd, src, off, dst); in emit_stx()
380 /* Atomic read-modify-write */
382 u8 dst, u8 src, s16 off, u8 code) in emit_atomic_r64() argument
388 emit(ctx, lld, t1, off, dst); in emit_atomic_r64()
410 emit(ctx, scd, t2, off, dst); in emit_atomic_r64()
411 emit(ctx, LLSC_beqz, t2, -16 - LLSC_offset); in emit_atomic_r64()
420 /* Atomic compare-and-exchange */
421 static void emit_cmpxchg_r64(struct jit_context *ctx, u8 dst, u8 src, s16 off) in emit_cmpxchg_r64() argument
428 emit(ctx, lld, t1, off, dst); in emit_cmpxchg_r64()
431 emit(ctx, scd, t2, off, dst); in emit_cmpxchg_r64()
432 emit(ctx, LLSC_beqz, t2, -20 - LLSC_offset); in emit_cmpxchg_r64()
438 /* Function call */
447 if (bpf_jit_get_func_addr(ctx->program, insn, false, in emit_call()
449 return -1; in emit_call()
451 return -1; in emit_call()
453 /* Push caller-saved registers on stack */ in emit_call()
454 push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); in emit_call()
456 /* Emit function call */ in emit_call()
461 /* Restore caller-saved registers */ in emit_call()
462 pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); in emit_call()
464 /* Re-initialize the JIT zero-extension register if accessed */ in emit_call()
465 if (ctx->accessed & BIT(JIT_REG_ZX)) { in emit_call()
466 emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); in emit_call()
476 /* Function tail call */
483 int off; in emit_tail_call() local
487 * eBPF R1 - function argument (context ptr), passed in a0-a1 in emit_tail_call()
488 * eBPF R2 - ptr to object with array of function entry points in emit_tail_call()
489 * eBPF R3 - array index of function to be called in emit_tail_call()
492 /* if (ind >= ary->map.max_entries) goto out */ in emit_tail_call()
493 off = offsetof(struct bpf_array, map.max_entries); in emit_tail_call()
494 if (off > 0x7fff) in emit_tail_call()
495 return -1; in emit_tail_call()
496 emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/ in emit_tail_call()
498 emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ in emit_tail_call()
500 /* if (--TCC < 0) goto out */ in emit_tail_call()
501 emit(ctx, daddiu, tcc, tcc, -1); /* tcc-- (delay slot) */ in emit_tail_call()
502 emit(ctx, bltz, tcc, get_offset(ctx, 1)); /* PC += off(1) if tcc < 0 */ in emit_tail_call()
504 /* prog = ary->ptrs[ind] */ in emit_tail_call()
505 off = offsetof(struct bpf_array, ptrs); in emit_tail_call()
506 if (off > 0x7fff) in emit_tail_call()
507 return -1; in emit_tail_call()
510 emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ in emit_tail_call()
513 emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ in emit_tail_call()
516 /* func = prog->bpf_func + 8 (prologue skip offset) */ in emit_tail_call()
517 off = offsetof(struct bpf_prog, bpf_func); in emit_tail_call()
518 if (off > 0x7fff) in emit_tail_call()
519 return -1; in emit_tail_call()
520 emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ in emit_tail_call()
533 * +===========================+ <--- MIPS sp before call
534 * | Callee-saved registers, |
536 * +---------------------------+ <--- eBPF FP (MIPS fp)
539 * +---------------------------+
540 * | Reserved for caller-saved |
542 * Lower address +===========================+ <--- MIPS sp
563 * On a tail call, the calling function jumps into the prologue in build_prologue()
568 /* === Entry-point for tail calls === */ in build_prologue()
575 if (ctx->accessed & BIT(BPF_REG_FP)) in build_prologue()
577 if (ctx->accessed & BIT(JIT_REG_TC)) in build_prologue()
579 if (ctx->accessed & BIT(JIT_REG_ZX)) in build_prologue()
582 /* Compute the stack space needed for callee-saved registers */ in build_prologue()
583 saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64); in build_prologue()
587 locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); in build_prologue()
590 * If we are emitting function calls, reserve extra stack space for in build_prologue()
591 * caller-saved registers needed by the JIT. The required space is in build_prologue()
594 reserved = ctx->stack_used; in build_prologue()
599 emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack); in build_prologue()
601 /* Store callee-saved registers on stack */ in build_prologue()
602 push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); in build_prologue()
605 if (ctx->accessed & BIT(BPF_REG_FP)) in build_prologue()
606 emit(ctx, daddiu, fp, MIPS_R_SP, stack - saved); in build_prologue()
608 /* Initialize the ePF JIT zero-extension register if accessed */ in build_prologue()
609 if (ctx->accessed & BIT(JIT_REG_ZX)) { in build_prologue()
610 emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); in build_prologue()
614 ctx->saved_size = saved; in build_prologue()
615 ctx->stack_size = stack; in build_prologue()
621 /* Restore callee-saved registers from stack */ in build_epilogue()
622 pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, in build_epilogue()
623 ctx->stack_size - ctx->saved_size); in build_epilogue()
626 if (ctx->stack_size) in build_epilogue()
627 emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); in build_epilogue()
629 /* Jump to return address and sign-extend the 32-bit return value */ in build_epilogue()
637 u8 dst = bpf2mips64[insn->dst_reg]; in build_insn()
638 u8 src = bpf2mips64[insn->src_reg]; in build_insn()
640 u8 code = insn->code; in build_insn()
641 s16 off = insn->off; in build_insn() local
642 s32 imm = insn->imm; in build_insn()
663 /* dst = -dst */ in build_insn()
688 /* dst = dst - imm */ in build_insn()
723 /* dst = dst - src */ in build_insn()
739 /* dst = imm (64-bit) */ in build_insn()
743 /* dst = src (64-bit) */ in build_insn()
747 /* dst = -dst (64-bit) */ in build_insn()
751 /* dst = dst & imm (64-bit) */ in build_insn()
752 /* dst = dst | imm (64-bit) */ in build_insn()
753 /* dst = dst ^ imm (64-bit) */ in build_insn()
754 /* dst = dst << imm (64-bit) */ in build_insn()
755 /* dst = dst >> imm (64-bit) */ in build_insn()
756 /* dst = dst >> imm ((64-bit, arithmetic) */ in build_insn()
757 /* dst = dst + imm (64-bit) */ in build_insn()
758 /* dst = dst - imm (64-bit) */ in build_insn()
759 /* dst = dst * imm (64-bit) */ in build_insn()
760 /* dst = dst / imm (64-bit) */ in build_insn()
761 /* dst = dst % imm (64-bit) */ in build_insn()
780 /* dst = dst & src (64-bit) */ in build_insn()
781 /* dst = dst | src (64-bit) */ in build_insn()
782 /* dst = dst ^ src (64-bit) */ in build_insn()
783 /* dst = dst << src (64-bit) */ in build_insn()
784 /* dst = dst >> src (64-bit) */ in build_insn()
785 /* dst = dst >> src (64-bit, arithmetic) */ in build_insn()
786 /* dst = dst + src (64-bit) */ in build_insn()
787 /* dst = dst - src (64-bit) */ in build_insn()
788 /* dst = dst * src (64-bit) */ in build_insn()
789 /* dst = dst / src (64-bit) */ in build_insn()
790 /* dst = dst % src (64-bit) */ in build_insn()
823 /* LDX: dst = *(size *)(src + off) */ in build_insn()
828 emit_ldx(ctx, dst, src, off, BPF_SIZE(code)); in build_insn()
830 /* ST: *(size *)(dst + off) = imm */ in build_insn()
836 emit_stx(ctx, dst, MIPS_R_T4, off, BPF_SIZE(code)); in build_insn()
838 /* STX: *(size *)(dst + off) = src */ in build_insn()
843 emit_stx(ctx, dst, src, off, BPF_SIZE(code)); in build_insn()
862 emit_atomic_r64(ctx, dst, src, off, imm); in build_insn()
871 emit_atomic_r(ctx, tmp, src, off, imm); in build_insn()
873 } else { /* 32-bit, no fetch */ in build_insn()
875 emit_atomic_r(ctx, dst, MIPS_R_T4, off, imm); in build_insn()
880 emit_cmpxchg_r64(ctx, dst, src, off); in build_insn()
888 emit_cmpxchg_r(ctx, dst, MIPS_R_T5, tmp, off); in build_insn()
898 /* PC += off if dst == src */ in build_insn()
899 /* PC += off if dst != src */ in build_insn()
900 /* PC += off if dst & src */ in build_insn()
901 /* PC += off if dst > src */ in build_insn()
902 /* PC += off if dst >= src */ in build_insn()
903 /* PC += off if dst < src */ in build_insn()
904 /* PC += off if dst <= src */ in build_insn()
905 /* PC += off if dst > src (signed) */ in build_insn()
906 /* PC += off if dst >= src (signed) */ in build_insn()
907 /* PC += off if dst < src (signed) */ in build_insn()
908 /* PC += off if dst <= src (signed) */ in build_insn()
920 if (off == 0) in build_insn()
922 setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); in build_insn()
923 emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ in build_insn()
924 emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */ in build_insn()
926 if (finish_jmp(ctx, jmp, off) < 0) in build_insn()
929 /* PC += off if dst == imm */ in build_insn()
930 /* PC += off if dst != imm */ in build_insn()
931 /* PC += off if dst & imm */ in build_insn()
932 /* PC += off if dst > imm */ in build_insn()
933 /* PC += off if dst >= imm */ in build_insn()
934 /* PC += off if dst < imm */ in build_insn()
935 /* PC += off if dst <= imm */ in build_insn()
936 /* PC += off if dst > imm (signed) */ in build_insn()
937 /* PC += off if dst >= imm (signed) */ in build_insn()
938 /* PC += off if dst < imm (signed) */ in build_insn()
939 /* PC += off if dst <= imm (signed) */ in build_insn()
951 if (off == 0) in build_insn()
953 setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); in build_insn()
954 emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ in build_insn()
958 /* Move large immediate to register, sign-extended */ in build_insn()
962 if (finish_jmp(ctx, jmp, off) < 0) in build_insn()
965 /* PC += off if dst == src */ in build_insn()
966 /* PC += off if dst != src */ in build_insn()
967 /* PC += off if dst & src */ in build_insn()
968 /* PC += off if dst > src */ in build_insn()
969 /* PC += off if dst >= src */ in build_insn()
970 /* PC += off if dst < src */ in build_insn()
971 /* PC += off if dst <= src */ in build_insn()
972 /* PC += off if dst > src (signed) */ in build_insn()
973 /* PC += off if dst >= src (signed) */ in build_insn()
974 /* PC += off if dst < src (signed) */ in build_insn()
975 /* PC += off if dst <= src (signed) */ in build_insn()
987 if (off == 0) in build_insn()
989 setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); in build_insn()
991 if (finish_jmp(ctx, jmp, off) < 0) in build_insn()
994 /* PC += off if dst == imm */ in build_insn()
995 /* PC += off if dst != imm */ in build_insn()
996 /* PC += off if dst & imm */ in build_insn()
997 /* PC += off if dst > imm */ in build_insn()
998 /* PC += off if dst >= imm */ in build_insn()
999 /* PC += off if dst < imm */ in build_insn()
1000 /* PC += off if dst <= imm */ in build_insn()
1001 /* PC += off if dst > imm (signed) */ in build_insn()
1002 /* PC += off if dst >= imm (signed) */ in build_insn()
1003 /* PC += off if dst < imm (signed) */ in build_insn()
1004 /* PC += off if dst <= imm (signed) */ in build_insn()
1016 if (off == 0) in build_insn()
1018 setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); in build_insn()
1026 if (finish_jmp(ctx, jmp, off) < 0) in build_insn()
1029 /* PC += off */ in build_insn()
1031 if (off == 0) in build_insn()
1033 if (emit_ja(ctx, off) < 0) in build_insn()
1041 /* Function call */ in build_insn()
1046 /* Function return */ in build_insn()
1052 if (ctx->bpf_index == ctx->program->len - 1) in build_insn()
1061 return -EINVAL; in build_insn()
1064 return -EFAULT; in build_insn()
1067 ctx->bpf_index, code); in build_insn()
1068 return -E2BIG; in build_insn()