From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7A867A2E1B for ; Tue, 3 Sep 2019 13:00:38 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CBCEB1EAF2; Tue, 3 Sep 2019 12:59:58 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id 169681EAE2 for ; Tue, 3 Sep 2019 12:59:56 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id x83Axbtn028733; Tue, 3 Sep 2019 03:59:54 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0818; bh=gD8HCQ7ZFYU6OANpXcsF3UMMTntj1gc/pvoX+Egz+3Q=; b=d5m7E5OwPkSNwY6vZoRIZil9dPDtODR0yy825kycmmcDkbURiYz2ik6pGOi56WrkS7Bz Ne5WtMGsKGuL1Ufpf7hnsWgY8KwJ7s5UxY5DpXL0uKACvUw5wIZXuEUD2T41lz38awoT eABathhcGWrqmXKYqYP8lAq/nI/6hosWLryX/jo3IRubGl89VYsyD8NJR0+N3HNIoUfX MNfKI1DuTNiuBGodyg/tsm0lUnLn32UqJ24prsQJrSt+KFrvcXBn2SfURfMJnvo4SV+2 gag1hDLKaUEjnMPnsD5vXnc8ULdUA/FI+v7alXLvSF3qTONvec6c3qJ3LP57/v6IWBAD 5Q== Received: from sc-exch01.marvell.com ([199.233.58.181]) by mx0b-0016f401.pphosted.com with ESMTP id 2uqrdm958p-2 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 03 Sep 2019 03:59:54 -0700 Received: from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH01.marvell.com (10.93.176.81) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Tue, 3 Sep 2019 03:59:51 -0700 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com (10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend Transport; Tue, 3 Sep 2019 03:59:51 -0700 Received: from jerin-lab.marvell.com (jerin-lab.marvell.com [10.28.34.14]) by maili.marvell.com (Postfix) with ESMTP id 2D5E93F7091; Tue, 3 Sep 2019 03:59:46 -0700 (PDT) From: To: CC: , , , , Jerin Jacob Date: Tue, 3 Sep 2019 16:29:38 +0530 Message-ID: <20190903105938.33231-9-jerinj@marvell.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20190903105938.33231-1-jerinj@marvell.com> References: <20190903105938.33231-1-jerinj@marvell.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.70,1.0.8 definitions=2019-09-03_01:2019-09-03,2019-09-03 signatures=0 Subject: [dpdk-dev] [PATCH 8/8] bpf/arm64: add branch operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Jerin Jacob Add branch and call operations. jump_offset_* APIs used for finding the relative offset to jump w.r.t current eBPF program PC. Signed-off-by: Jerin Jacob --- lib/librte_bpf/bpf_jit_arm64.c | 229 +++++++++++++++++++++++++++++++++ 1 file changed, 229 insertions(+) diff --git a/lib/librte_bpf/bpf_jit_arm64.c b/lib/librte_bpf/bpf_jit_arm64.c index 62fa6a505..8882fee67 100644 --- a/lib/librte_bpf/bpf_jit_arm64.c +++ b/lib/librte_bpf/bpf_jit_arm64.c @@ -105,6 +105,112 @@ check_invalid_args(struct a64_jit_ctx *ctx, uint32_t limit) return 0; } +static int +jump_offset_init(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) +{ + uint32_t i; + + ctx->map = malloc(bpf->prm.nb_ins * sizeof(ctx->map[0])); + if (ctx->map == NULL) + return -ENOMEM; + + /* Fill with fake offsets */ + for (i = 0; i != bpf->prm.nb_ins; i++) { + ctx->map[i].off = INT32_MAX; + ctx->map[i].off_to_b = 0; + } + return 0; +} + +static void +jump_offset_fini(struct a64_jit_ctx *ctx) +{ + free(ctx->map); +} + +static void +jump_offset_update(struct a64_jit_ctx *ctx, uint32_t ebpf_idx) +{ + if (is_first_pass(ctx)) + ctx->map[ebpf_idx].off = ctx->idx; +} + +static void +jump_offset_to_branch_update(struct a64_jit_ctx *ctx, uint32_t ebpf_idx) +{ + if (is_first_pass(ctx)) + ctx->map[ebpf_idx].off_to_b = ctx->idx - ctx->map[ebpf_idx].off; + +} + +static int32_t +jump_offset_get(struct a64_jit_ctx *ctx, uint32_t from, int16_t offset) +{ + int32_t a64_from, a64_to; + + a64_from = ctx->map[from].off + ctx->map[from].off_to_b; + a64_to = ctx->map[from + offset + 1].off; + + if (a64_to == INT32_MAX) + return a64_to; + + return a64_to - a64_from; +} + +enum a64_cond_e { + A64_EQ = 0x0, /* == */ + A64_NE = 0x1, /* != */ + A64_CS = 0x2, /* Unsigned >= */ + A64_CC = 0x3, /* Unsigned < */ + A64_MI = 0x4, /* < 0 */ + A64_PL = 0x5, /* >= 0 */ + A64_VS = 0x6, /* Overflow */ + A64_VC = 0x7, /* No overflow */ + A64_HI = 0x8, /* Unsigned > */ + A64_LS = 0x9, /* Unsigned <= */ + A64_GE = 0xa, /* Signed >= */ + A64_LT = 0xb, /* Signed < */ + A64_GT = 0xc, /* Signed > */ + A64_LE = 0xd, /* Signed <= */ + A64_AL = 0xe, /* Always */ +}; + +static int +check_cond(uint8_t cond) +{ + return (cond >= A64_AL) ? 1 : 0; +} + +static uint8_t +ebpf_to_a64_cond(uint8_t op) +{ + switch (BPF_OP(op)) { + case BPF_JEQ: + return A64_EQ; + case BPF_JGT: + return A64_HI; + case EBPF_JLT: + return A64_CC; + case BPF_JGE: + return A64_CS; + case EBPF_JLE: + return A64_LS; + case BPF_JSET: + case EBPF_JNE: + return A64_NE; + case EBPF_JSGT: + return A64_GT; + case EBPF_JSLT: + return A64_LT; + case EBPF_JSGE: + return A64_GE; + case EBPF_JSLE: + return A64_LE; + default: + return UINT8_MAX; + } +} + /* Emit an instruction */ static inline void emit_insn(struct a64_jit_ctx *ctx, uint32_t insn, int error) @@ -525,6 +631,17 @@ emit_mod(struct a64_jit_ctx *ctx, bool is64, uint8_t tmp, uint8_t rd, emit_msub(ctx, is64, rd, tmp, rm, rd); } +static void +emit_blr(struct a64_jit_ctx *ctx, uint8_t rn) +{ + uint32_t insn; + + insn = 0xd63f0000; + insn |= rn << 5; + + emit_insn(ctx, insn, check_reg(rn)); +} + static void emit_zero_extend(struct a64_jit_ctx *ctx, uint8_t rd, int32_t imm) { @@ -799,6 +916,16 @@ emit_epilogue(struct a64_jit_ctx *ctx) emit_epilogue_no_call(ctx); } +static void +emit_call(struct a64_jit_ctx *ctx, uint8_t tmp, void *func) +{ + uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0); + + emit_mov_imm(ctx, 1, tmp, (uint64_t)func); + emit_blr(ctx, tmp); + emit_mov_64(ctx, r0, A64_R(0)); +} + static void emit_cbnz(struct a64_jit_ctx *ctx, bool is64, uint8_t rt, int32_t imm19) { @@ -914,6 +1041,54 @@ emit_xadd(struct a64_jit_ctx *ctx, uint8_t op, uint8_t tmp1, uint8_t tmp2, } } +#define A64_CMP 0x6b00000f +#define A64_TST 0x6a00000f +static void +emit_cmp_tst(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm, + uint32_t opc) +{ + uint32_t insn; + + insn = opc; + insn |= (!!is64) << 31; + insn |= rm << 16; + insn |= rn << 5; + + emit_insn(ctx, insn, check_reg(rn) || check_reg(rm)); +} + +static void +emit_cmp(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm) +{ + emit_cmp_tst(ctx, is64, rn, rm, A64_CMP); +} + +static void +emit_tst(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm) +{ + emit_cmp_tst(ctx, is64, rn, rm, A64_TST); +} + +static void +emit_b_cond(struct a64_jit_ctx *ctx, uint8_t cond, int32_t imm19) +{ + uint32_t insn, imm; + + imm = mask_imm(19, imm19); + insn = 0x15 << 26; + insn |= imm << 5; + insn |= cond; + + emit_insn(ctx, insn, check_cond(cond) || check_imm(19, imm19)); +} + +static void +emit_branch(struct a64_jit_ctx *ctx, uint8_t op, uint32_t i, int16_t off) +{ + jump_offset_to_branch_update(ctx, i); + emit_b_cond(ctx, ebpf_to_a64_cond(op), jump_offset_get(ctx, i, off)); +} + static void check_program_has_call(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) { @@ -961,6 +1136,7 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) for (i = 0; i != bpf->prm.nb_ins; i++) { + jump_offset_update(ctx, i); ins = bpf->prm.ins + i; op = ins->code; off = ins->off; @@ -1150,6 +1326,52 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf) case (BPF_STX | EBPF_XADD | EBPF_DW): emit_xadd(ctx, op, tmp1, tmp2, tmp3, dst, off, src); break; + /* PC += off */ + case (BPF_JMP | BPF_JA): + emit_b(ctx, jump_offset_get(ctx, i, off)); + break; + /* PC += off if dst COND imm */ + case (BPF_JMP | BPF_JEQ | BPF_K): + case (BPF_JMP | EBPF_JNE | BPF_K): + case (BPF_JMP | BPF_JGT | BPF_K): + case (BPF_JMP | EBPF_JLT | BPF_K): + case (BPF_JMP | BPF_JGE | BPF_K): + case (BPF_JMP | EBPF_JLE | BPF_K): + case (BPF_JMP | EBPF_JSGT | BPF_K): + case (BPF_JMP | EBPF_JSLT | BPF_K): + case (BPF_JMP | EBPF_JSGE | BPF_K): + case (BPF_JMP | EBPF_JSLE | BPF_K): + emit_mov_imm(ctx, 1, tmp1, imm); + emit_cmp(ctx, 1, dst, tmp1); + emit_branch(ctx, op, i, off); + break; + case (BPF_JMP | BPF_JSET | BPF_K): + emit_mov_imm(ctx, 1, tmp1, imm); + emit_tst(ctx, 1, dst, tmp1); + emit_branch(ctx, op, i, off); + break; + /* PC += off if dst COND src */ + case (BPF_JMP | BPF_JEQ | BPF_X): + case (BPF_JMP | EBPF_JNE | BPF_X): + case (BPF_JMP | BPF_JGT | BPF_X): + case (BPF_JMP | EBPF_JLT | BPF_X): + case (BPF_JMP | BPF_JGE | BPF_X): + case (BPF_JMP | EBPF_JLE | BPF_X): + case (BPF_JMP | EBPF_JSGT | BPF_X): + case (BPF_JMP | EBPF_JSLT | BPF_X): + case (BPF_JMP | EBPF_JSGE | BPF_X): + case (BPF_JMP | EBPF_JSLE | BPF_X): + emit_cmp(ctx, 1, dst, src); + emit_branch(ctx, op, i, off); + break; + case (BPF_JMP | BPF_JSET | BPF_X): + emit_tst(ctx, 1, dst, src); + emit_branch(ctx, op, i, off); + break; + /* Call imm */ + case (BPF_JMP | EBPF_CALL): + emit_call(ctx, tmp1, bpf->prm.xsym[ins->imm].func.val); + break; /* Return r0 */ case (BPF_JMP | EBPF_EXIT): emit_epilogue(ctx); @@ -1179,6 +1401,11 @@ bpf_jit_arm64(struct rte_bpf *bpf) /* Init JIT context */ memset(&ctx, 0, sizeof(ctx)); + /* Initialize the memory for eBPF to a64 insn offset map for jump */ + rc = jump_offset_init(&ctx, bpf); + if (rc) + goto error; + /* Find eBPF program has call class or not */ check_program_has_call(&ctx, bpf); @@ -1218,5 +1445,7 @@ bpf_jit_arm64(struct rte_bpf *bpf) munmap: munmap(ctx.ins, size); finish: + jump_offset_fini(&ctx); +error: return rc; } -- 2.23.0