From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4CB0BA04B1; Wed, 26 Aug 2020 17:21:14 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F23001C206; Wed, 26 Aug 2020 17:15:43 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 576781C0C3 for ; Wed, 26 Aug 2020 17:15:15 +0200 (CEST) IronPort-SDR: LGCG2AnehwrwsCoDwMJeToRkRX36qRytYwhX3zvxfuDPGsdyeuN76yk9UnVPmssJUMJArbSj+J 4UMiznoeuDmA== X-IronPort-AV: E=McAfee;i="6000,8403,9725"; a="153879587" X-IronPort-AV: E=Sophos;i="5.76,356,1592895600"; d="scan'208";a="153879587" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Aug 2020 08:15:14 -0700 IronPort-SDR: pp7AF0oxhvacTsp+LTcS1ccB2krrBSs+iYCvHsb/HrRb2omvXvkHY+SVm/YTvPyN9H63N1ed3b 0GQPL3/9bkEQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,356,1592895600"; d="scan'208";a="444081422" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com) ([10.237.223.107]) by orsmga004.jf.intel.com with ESMTP; 26 Aug 2020 08:15:13 -0700 From: Cristian Dumitrescu To: dev@dpdk.org Date: Wed, 26 Aug 2020 16:14:32 +0100 Message-Id: <20200826151445.51500-28-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200826151445.51500-1-cristian.dumitrescu@intel.com> References: <20200826151445.51500-1-cristian.dumitrescu@intel.com> Subject: [dpdk-dev] [PATCH 27/40] pipeline: add instruction optimizer X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Instruction optimizer. Detects frequent patterns and replaces them with some more powerful vector-like pipeline instructions without any user effort. Executes at instruction translation, not at run-time. Signed-off-by: Cristian Dumitrescu --- lib/librte_pipeline/rte_swx_pipeline.c | 226 +++++++++++++++++++++++++ 1 file changed, 226 insertions(+) diff --git a/lib/librte_pipeline/rte_swx_pipeline.c b/lib/librte_pipeline/rte_swx_pipeline.c index 0016e0f0a..d5c2a9c5d 100644 --- a/lib/librte_pipeline/rte_swx_pipeline.c +++ b/lib/librte_pipeline/rte_swx_pipeline.c @@ -5700,6 +5700,230 @@ instr_verify(struct rte_swx_pipeline *p __rte_unused, return 0; } +static int +instr_pattern_extract_many_detect(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr, + uint32_t *n_pattern_instr) +{ + uint32_t i; + + for (i = 0; i < n_instr; i++) { + if (data[i].invalid) + break; + + if (instr[i].type != INSTR_HDR_EXTRACT) + break; + + if (i == RTE_DIM(instr->io.hdr.header_id)) + break; + + if (i && data[i].n_users) + break; + } + + if (i < 2) + return 0; + + *n_pattern_instr = i; + return 1; +} + +static void +instr_pattern_extract_many_optimize(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr) +{ + uint32_t i; + + for (i = 1; i < n_instr; i++) { + instr[0].type++; + instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0]; + instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0]; + instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0]; + + data[i].invalid = 1; + } +} + +static int +instr_pattern_emit_many_tx_detect(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr, + uint32_t *n_pattern_instr) +{ + uint32_t i; + + for (i = 0; i < n_instr; i++) { + if (data[i].invalid) + break; + + if (instr[i].type != INSTR_HDR_EMIT) + break; + + if (i == RTE_DIM(instr->io.hdr.header_id)) + break; + + if (i && data[i].n_users) + break; + } + + if (!i) + return 0; + + if (instr[i].type != INSTR_TX) + return 0; + + i++; + + *n_pattern_instr = i; + return 1; +} + +static void +instr_pattern_emit_many_tx_optimize(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr) +{ + uint32_t i; + + /* Any emit instruction in addition to the first one. */ + for (i = 1; i < n_instr - 1; i++) { + instr[0].type++; + instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0]; + instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0]; + instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0]; + + data[i].invalid = 1; + } + + /* The TX instruction is the last one in the pattern. */ + instr[0].type++; + instr[0].io.io.offset = instr[i].io.io.offset; + instr[0].io.io.n_bits = instr[i].io.io.n_bits; + data[i].invalid = 1; +} + +static int +instr_pattern_dma_many_detect(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr, + uint32_t *n_pattern_instr) +{ + uint32_t i; + + for (i = 0; i < n_instr; i++) { + if (data[i].invalid) + break; + + if (instr[i].type != INSTR_DMA_HT) + break; + + if (i == RTE_DIM(instr->dma.dst.header_id)) + break; + + if (i && data[i].n_users) + break; + } + + if (i < 2) + return 0; + + *n_pattern_instr = i; + return 1; +} + +static void +instr_pattern_dma_many_optimize(struct instruction *instr, + struct instruction_data *data, + uint32_t n_instr) +{ + uint32_t i; + + for (i = 1; i < n_instr; i++) { + instr[0].type++; + instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0]; + instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0]; + instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0]; + instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0]; + + data[i].invalid = 1; + } +} + +static uint32_t +instr_optimize(struct instruction *instructions, + struct instruction_data *instruction_data, + uint32_t n_instructions) +{ + uint32_t i, pos = 0; + + for (i = 0; i < n_instructions; ) { + struct instruction *instr = &instructions[i]; + struct instruction_data *data = &instruction_data[i]; + uint32_t n_instr = 0; + int detected; + + /* Extract many. */ + detected = instr_pattern_extract_many_detect(instr, + data, + n_instructions - i, + &n_instr); + if (detected) { + instr_pattern_extract_many_optimize(instr, + data, + n_instr); + i += n_instr; + continue; + } + + /* Emit many + TX. */ + detected = instr_pattern_emit_many_tx_detect(instr, + data, + n_instructions - i, + &n_instr); + if (detected) { + instr_pattern_emit_many_tx_optimize(instr, + data, + n_instr); + i += n_instr; + continue; + } + + /* DMA many. */ + detected = instr_pattern_dma_many_detect(instr, + data, + n_instructions - i, + &n_instr); + if (detected) { + instr_pattern_dma_many_optimize(instr, data, n_instr); + i += n_instr; + continue; + } + + /* No pattern starting at the current instruction. */ + i++; + } + + /* Eliminate the invalid instructions that have been optimized out. */ + for (i = 0; i < n_instructions; i++) { + struct instruction *instr = &instructions[i]; + struct instruction_data *data = &instruction_data[i]; + + if (data->invalid) + continue; + + if (i != pos) { + memcpy(&instructions[pos], instr, sizeof(*instr)); + memcpy(&instruction_data[pos], data, sizeof(*data)); + } + + pos++; + } + + return pos; +} + static int instruction_config(struct rte_swx_pipeline *p, struct action *a, @@ -5752,6 +5976,8 @@ instruction_config(struct rte_swx_pipeline *p, if (err) goto error; + n_instructions = instr_optimize(instr, data, n_instructions); + err = instr_jmp_resolve(instr, data, n_instructions); if (err) goto error; -- 2.17.1