From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9C96D4303E; Sat, 12 Aug 2023 09:32:44 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D411B4326E; Sat, 12 Aug 2023 09:32:29 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 9D9F54325B for ; Sat, 12 Aug 2023 09:32:27 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691825547; x=1723361547; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=2TAvMwrXMKcUfgR2tcJnA9vWr5cNwRlq2+BE0D7R1bY=; b=mGuXSeig9P5slrRQapc25eN4I+ZvravwQuQH56zBolP7F06w7yfySUZK EUBts9T0VyNEEb2jNRGWTbUL6kRHaXTG0YGFwzO6CKdsHeZRCgaoT78/N se3YO1xXno0vzig+a4J+gXeHG4PY51pg9XRWS6jWqKHezcoySk87pTelL VAqTv2Yw3yX9K35JVNaYgp6Dh0F6CYyIxWp7Ez6C2HaXGmPC9NLx7YXDU BHBqF8BuWlQQICuf6Y0WDxBfX5Qi7Jsin56J3Wggui55CCw+2+Pv42e2X JkgnZQJ+xNLhUePRRXjXIDsLHjkYxZVpzzCTEIqynk2nl7sPal5dRhVuz A==; X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="369282002" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="369282002" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Aug 2023 00:32:27 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="798268358" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="798268358" Received: from dpdk-yuyingzh-icelake.sh.intel.com ([10.67.116.226]) by fmsmga008.fm.intel.com with ESMTP; 12 Aug 2023 00:32:25 -0700 From: Yuying Zhang To: dev@dpdk.org, beilei.xing@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com Cc: Yuying Zhang Subject: [PATCH v1 4/5] net/cpfl: add fxp rule module Date: Sat, 12 Aug 2023 07:55:05 +0000 Message-Id: <20230812075506.361769-5-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230812075506.361769-1-yuying.zhang@intel.com> References: <20230812075506.361769-1-yuying.zhang@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Added low level fxp module for rule packing / creation / destroying. Signed-off-by: Yuying Zhang --- drivers/net/cpfl/cpfl_ethdev.h | 4 + drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_fxp_rule.h | 87 ++++++++++ drivers/net/cpfl/meson.build | 1 + 4 files changed, 380 insertions(+) create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index c71f16ac60..63bcc5551f 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -145,10 +145,14 @@ enum cpfl_itf_type { TAILQ_HEAD(cpfl_flow_list, rte_flow); +#define CPFL_FLOW_BATCH_SIZE 490 struct cpfl_itf { enum cpfl_itf_type type; struct cpfl_adapter_ext *adapter; struct cpfl_flow_list flow_list; + struct idpf_dma_mem flow_dma; + struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE]; + struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE]; void *data; }; diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c new file mode 100644 index 0000000000..936f57e4fa --- /dev/null +++ b/drivers/net/cpfl/cpfl_fxp_rule.c @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ +#include "cpfl_ethdev.h" + +#include "cpfl_fxp_rule.h" +#include "cpfl_logs.h" + +#define CTLQ_SEND_RETRIES 100 +#define CTLQ_RECEIVE_RETRIES 100 + +int +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + struct idpf_ctlq_msg **msg_ptr_list; + uint16_t clean_count = 0; + int num_cleaned = 0; + int retries = 0; + int ret = 0; + + msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *)); + if (!msg_ptr_list) { + PMD_INIT_LOG(ERR, "no memory for cleaning ctlq"); + ret = -ENOMEM; + goto err; + } + + ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg); + if (ret) { + PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret); + goto send_err; + } + + while (retries <= CTLQ_SEND_RETRIES) { + clean_count = num_q_msg - num_cleaned; + ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count, + &msg_ptr_list[num_cleaned]); + if (ret) { + PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret); + goto send_err; + } + + num_cleaned += clean_count; + retries++; + if (num_cleaned >= num_q_msg) + break; + rte_delay_us_sleep(10); + } + + if (retries > CTLQ_SEND_RETRIES) { + PMD_INIT_LOG(ERR, "timed out while polling for completions"); + ret = -1; + goto send_err; + } + +send_err: + if (msg_ptr_list) + free(msg_ptr_list); +err: + return ret; +} + +static int +cpfl_process_rx_ctlq_msg(uint16_t num_q_msg, struct idpf_ctlq_msg *q_msg) +{ + uint16_t i; + int ret = 0; + + if (!num_q_msg || !q_msg) + return -EINVAL; + + for (i = 0; i < num_q_msg; i++) { + switch (q_msg[i].status) { + case CPFL_CFG_PKT_ERR_OK: + continue; + case CPFL_CFG_PKT_ERR_EEXIST: + PMD_INIT_LOG(ERR, "The rule has confliction with already existed one"); + return -EINVAL; + case CPFL_CFG_PKT_ERR_ENOTFND: + PMD_INIT_LOG(ERR, "The rule has already deleted"); + return -EINVAL; + default: + PMD_INIT_LOG(ERR, "Invalid rule"); + return -EINVAL; + } + } + + return ret; +} + +int +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + int retries = 0; + struct idpf_dma_mem *dma; + uint16_t i; + uint16_t buff_cnt; + int ret = 0; + + retries = 0; + while (retries <= CTLQ_RECEIVE_RETRIES) { + rte_delay_us_sleep(10); + ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]); + + if (ret && ret != CPFL_ERR_CTLQ_NO_WORK && + ret != CPFL_ERR_CTLQ_ERROR) { + PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret); + retries++; + continue; + } + + if (ret == CPFL_ERR_CTLQ_NO_WORK) { + retries++; + continue; + } + + if (ret == CPFL_ERR_CTLQ_EMPTY) + break; + + ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg); + if (ret) { + PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq msg"); + break; + } + + for (i = 0; i < num_q_msg; i++) { + if (q_msg[i].data_len > 0) + dma = q_msg[i].ctx.indirect.payload; + else + dma = NULL; + + buff_cnt = dma ? 1 : 0; + ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma); + if (ret) + PMD_INIT_LOG(WARNING, "could not posted recv bufs\n"); + } + break; + } + + if (retries > CTLQ_RECEIVE_RETRIES) { + PMD_INIT_LOG(ERR, "timed out while polling for receive response"); + ret = -1; + } + + return ret; +} + +static int +pack_mod_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg) +{ + struct cpfl_mod_rule_info *minfo = &rinfo->mod; + union cpfl_rule_cfg_pkt_record *blob = NULL; + struct cpfl_rule_cfg_data cfg = {0}; + + /* prepare rule blob */ + if (!dma->va) { + PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; + memset(blob, 0, sizeof(*blob)); + memset(&cfg, 0, sizeof(cfg)); + + /* fill info for both query and add/update */ + cpfl_fill_rule_mod_content(minfo->mod_obj_size, + minfo->pin_mod_content, + minfo->mod_index, + &cfg.ext.mod_content); + + /* only fill content for add/update */ + memcpy(blob->mod_blob, minfo->mod_content, + minfo->mod_content_byte_len); + +#define NO_HOST_NEEDED 0 + /* pack message */ + cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule, + rinfo->cookie, + 0, /* vsi_id not used for mod */ + rinfo->port_num, + NO_HOST_NEEDED, + 0, /* time_sel */ + 0, /* time_sel_val */ + 0, /* cache_wr_thru */ + rinfo->resp_req, + (u16)sizeof(*blob), + (void *)dma, + &cfg.common); + cpfl_prep_rule_desc(&cfg, msg); + return 0; +} + +static int pack_default_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg, bool add) +{ + union cpfl_rule_cfg_pkt_record *blob = NULL; + enum cpfl_ctlq_rule_cfg_opc opc; + struct cpfl_rule_cfg_data cfg; + uint16_t cfg_ctrl; + + if (!dma->va) { + PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; + memset(blob, 0, sizeof(*blob)); + memset(msg, 0, sizeof(*msg)); + + if (rinfo->type == CPFL_RULE_TYPE_SEM) { + cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id, + rinfo->sem.sub_prof_id, + rinfo->sem.pin_to_cache, + rinfo->sem.fixed_fetch); + cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len, + rinfo->act_bytes, rinfo->act_byte_len, + cfg_ctrl, blob); + opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule; + } + + cpfl_fill_rule_cfg_data_common(opc, + rinfo->cookie, + rinfo->vsi, + rinfo->port_num, + rinfo->host_id, + 0, /* time_sel */ + 0, /* time_sel_val */ + 0, /* cache_wr_thru */ + rinfo->resp_req, + sizeof(union cpfl_rule_cfg_pkt_record), + dma, + &cfg.common); + cpfl_prep_rule_desc(&cfg, msg); + return 0; +} + +static int pack_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg, bool add) +{ + int ret = 0; + + if (rinfo->type == CPFL_RULE_TYPE_SEM) { + if (pack_default_rule(rinfo, dma, msg, add) < 0) + ret = -1; + } else if (rinfo->type == CPFL_RULE_TYPE_MOD) { + if (pack_mod_rule(rinfo, dma, msg) < 0) + ret = -1; + } + + return ret; +} + +int +cpfl_rule_update(struct cpfl_itf *itf, + struct idpf_ctlq_info *tx_cq, + struct idpf_ctlq_info *rx_cq, + struct cpfl_rule_info *rinfo, + int rule_num, + bool add) +{ + struct idpf_hw *hw = &itf->adapter->base.hw; + int i; + int ret = 0; + + if (rule_num == 0) + return 0; + + for (i = 0; i < rule_num; i++) { + ret = pack_rule(&rinfo[i], &itf->dma[i], &itf->msg[i], add); + if (ret) { + PMD_INIT_LOG(ERR, "Could not create rule"); + return ret; + } + } + ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to send rule"); + return ret; + } + ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to create rule"); + return ret; + } + + return 0; +} diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h new file mode 100644 index 0000000000..68efa8e3f8 --- /dev/null +++ b/drivers/net/cpfl/cpfl_fxp_rule.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_FXP_RULE_H_ +#define _CPFL_FXP_RULE_H_ + +#include "cpfl_rules.h" + +#define CPFL_MAX_KEY_LEN 128 +#define CPFL_MAX_RULE_ACTIONS 32 + +struct cpfl_sem_rule_info { + uint16_t prof_id; + uint8_t sub_prof_id; + uint8_t key[CPFL_MAX_KEY_LEN]; + uint8_t key_byte_len; + uint8_t pin_to_cache; + uint8_t fixed_fetch; +}; + +struct cpfl_lem_rule_info { + uint16_t prof_id; + uint8_t key[CPFL_MAX_KEY_LEN]; + uint8_t key_byte_len; + uint8_t pin_to_cache; + uint8_t fixed_fetch; +}; + +#define CPFL_MAX_MOD_CONTENT_LEN 256 +struct cpfl_mod_rule_info { + uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN]; + uint8_t mod_content_byte_len; + uint32_t mod_index; + uint8_t pin_mod_content; + uint8_t mod_obj_size; +}; + +enum cpfl_rule_type { + CPFL_RULE_TYPE_NONE, + CPFL_RULE_TYPE_SEM, + CPFL_RULE_TYPE_LEM, + CPFL_RULE_TYPE_MOD +}; + +struct cpfl_rule_info { + enum cpfl_rule_type type; + uint64_t cookie; + uint8_t host_id; + uint8_t port_num; + uint8_t resp_req; + /* TODO: change this to be dynamically allocated/reallocated */ + uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)]; + uint8_t act_byte_len; + /* vsi is used for lem and lpm rules */ + uint16_t vsi; + uint8_t clear_mirror_1st_state; + /* mod related fields */ + union { + struct cpfl_mod_rule_info mod; + struct cpfl_sem_rule_info sem; + struct cpfl_lem_rule_info lem; + }; +}; + +struct cpfl_meter_action_info { + uint8_t meter_logic_bank_id; + uint32_t meter_logic_idx; + uint8_t prof_id; + uint8_t slot; +}; + +extern struct cpfl_vport_ext *vport; + +int cpfl_rule_update(struct cpfl_itf *itf, + struct idpf_ctlq_info *tx_cq, + struct idpf_ctlq_info *rx_cq, + struct cpfl_rule_info *rinfo, + int rule_num, + bool add); +int +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg, + struct idpf_ctlq_msg q_msg[]); +int +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg, + struct idpf_ctlq_msg q_msg[]); +#endif /*CPFL_FXP_RULE_H*/ diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index 222497f7c2..4061123034 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -46,6 +46,7 @@ if js_dep.found() 'cpfl_flow_parser.c', 'cpfl_rules.c', 'cpfl_controlq.c', + 'cpfl_fxp_rule.c', ) dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true) ext_deps += js_dep -- 2.25.1