From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0ADD442338; Mon, 9 Oct 2023 10:03:08 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 01AAC406A2; Mon, 9 Oct 2023 10:02:30 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 821F140A6B for ; Mon, 9 Oct 2023 10:02:28 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1696838548; x=1728374548; h=from:to:subject:date:message-id:in-reply-to:references: mime-version:content-transfer-encoding; bh=xmLZj+JS+/hvuWFj6xFKbTtySsCmZLnUx0BqOAug4VY=; b=bfYLswXywBuOlmQdTtSizF3v/agJUjGxci2UHTgbtNjNVY/+N+MB048A v4KMmSNWwRvoj+jd0dn1ri10H0Cnm2E8ciCAlDX2L9r53Oiew++MzW1gT egMMNgbgX2Px6HfR+XippQOSRawdd+mDxprSc9CbvXk1aELrb25GJ1Z7D WlvnI9hNMuYgdSQTgzvAERFCMM9cKmeQgGkiSoq3VukvAC6YAzzP8a6Rx headbG4MKQ9ENIn6NhRs7Iv1ZbyCGNtcXr1o9maTpn3pE8lMYE553t98U NHOVIrE1Mu9kAl92ibKpze19N5iZ9Z+D/S2aSMilE5EMUjvknLnryG7s2 Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10857"; a="369155074" X-IronPort-AV: E=Sophos;i="6.03,209,1694761200"; d="scan'208";a="369155074" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Oct 2023 01:02:28 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10857"; a="926675885" X-IronPort-AV: E=Sophos;i="6.03,209,1694761200"; d="scan'208";a="926675885" Received: from dpdk-pengyuan-mev.sh.intel.com ([10.67.119.132]) by orsmga005.jf.intel.com with ESMTP; 09 Oct 2023 01:02:26 -0700 From: "Zhang, Yuying" To: yuying.zhang@intel.com, dev@dpdk.org, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Subject: [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Date: Fri, 8 Sep 2023 16:05:49 +0000 Message-Id: <20230908160552.148060-7-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230908160552.148060-1-yuying.zhang@intel.com> References: <20230928084458.2333663-1-yuying.zhang@intel.com> <20230908160552.148060-1-yuying.zhang@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Yuying Zhang Add a new module that implements FXP rule creation / destroying. Signed-off-by: Yuying Zhang Acked-by: Qi Zhang --- drivers/net/cpfl/cpfl_ethdev.c | 31 ++++ drivers/net/cpfl/cpfl_ethdev.h | 6 + drivers/net/cpfl/cpfl_fxp_rule.c | 263 +++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_fxp_rule.h | 68 ++++++++ drivers/net/cpfl/meson.build | 1 + 5 files changed, 369 insertions(+) create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index a2bc6784d0..762fbddfe6 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -16,6 +16,7 @@ #include #include "cpfl_rxtx.h" #include "cpfl_flow.h" +#include "cpfl_rules.h" #define CPFL_REPRESENTOR "representor" #define CPFL_TX_SINGLE_Q "tx_single" @@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev) adapter->cur_vport_nb--; dev->data->dev_private = NULL; adapter->vports[vport->sw_idx] = NULL; + idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma); rte_free(cpfl_vport); return 0; @@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport, return 0; } +int +cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size, + int batch_size) +{ + int i; + + if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) { + PMD_INIT_LOG(ERR, "Could not alloc dma memory"); + return -ENOMEM; + } + + for (i = 0; i < batch_size; i++) { + dma[i].va = (void *)((char *)orig_dma->va + size * (i + 1)); + dma[i].pa = orig_dma->pa + size * (i + 1); + dma[i].size = size; + dma[i].zone = NULL; + } + return 0; +} + static int cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) { @@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr, &dev->data->mac_addrs[0]); + memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma)); + memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg)); + ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma, + cpfl_vport->itf.dma, + sizeof(union cpfl_rule_cfg_pkt_record), + CPFL_FLOW_BATCH_SIZE); + if (ret < 0) + goto err_mac_addrs; + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) { memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info)); ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info); diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 7f83d170d7..8eeeac9910 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -147,10 +147,14 @@ enum cpfl_itf_type { TAILQ_HEAD(cpfl_flow_list, rte_flow); +#define CPFL_FLOW_BATCH_SIZE 490 struct cpfl_itf { enum cpfl_itf_type type; struct cpfl_adapter_ext *adapter; struct cpfl_flow_list flow_list; + struct idpf_dma_mem flow_dma; + struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE]; + struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE]; void *data; }; @@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter, int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter); int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter); int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter); +int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, + uint32_t size, int batch_size); #define CPFL_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c new file mode 100644 index 0000000000..ea65e20507 --- /dev/null +++ b/drivers/net/cpfl/cpfl_fxp_rule.c @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ +#include "cpfl_ethdev.h" + +#include "cpfl_fxp_rule.h" +#include "cpfl_logs.h" + +#define CTLQ_SEND_RETRIES 100 +#define CTLQ_RECEIVE_RETRIES 100 + +int +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + struct idpf_ctlq_msg **msg_ptr_list; + u16 clean_count = 0; + int num_cleaned = 0; + int retries = 0; + int ret = 0; + + msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *)); + if (!msg_ptr_list) { + PMD_INIT_LOG(ERR, "no memory for cleaning ctlq"); + ret = -ENOMEM; + goto err; + } + + ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg); + if (ret) { + PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret); + goto send_err; + } + + while (retries <= CTLQ_SEND_RETRIES) { + clean_count = num_q_msg - num_cleaned; + ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count, + &msg_ptr_list[num_cleaned]); + if (ret) { + PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret); + goto send_err; + } + + num_cleaned += clean_count; + retries++; + if (num_cleaned >= num_q_msg) + break; + rte_delay_us_sleep(10); + } + + if (retries > CTLQ_SEND_RETRIES) { + PMD_INIT_LOG(ERR, "timed out while polling for completions"); + ret = -1; + goto send_err; + } + +send_err: + if (msg_ptr_list) + free(msg_ptr_list); +err: + return ret; +} + +int +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + int retries = 0; + struct idpf_dma_mem *dma; + u16 i; + uint16_t buff_cnt; + int ret = 0; + + retries = 0; + while (retries <= CTLQ_RECEIVE_RETRIES) { + rte_delay_us_sleep(10); + ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]); + + if (ret && ret != CPFL_ERR_CTLQ_NO_WORK && + ret != CPFL_ERR_CTLQ_ERROR) { + PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret); + retries++; + continue; + } + + if (ret == CPFL_ERR_CTLQ_NO_WORK) { + retries++; + continue; + } + + if (ret == CPFL_ERR_CTLQ_EMPTY) + break; + + /* TODO - process rx controlq message */ + for (i = 0; i < num_q_msg; i++) { + if (q_msg[i].data_len > 0) + dma = q_msg[i].ctx.indirect.payload; + else + dma = NULL; + + buff_cnt = dma ? 1 : 0; + ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma); + if (ret) + PMD_INIT_LOG(WARNING, "could not posted recv bufs\n"); + } + break; + } + + if (retries > CTLQ_RECEIVE_RETRIES) { + PMD_INIT_LOG(ERR, "timed out while polling for receive response"); + ret = -1; + } + + return ret; +} + +static int +cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg) +{ + struct cpfl_mod_rule_info *minfo = &rinfo->mod; + union cpfl_rule_cfg_pkt_record *blob = NULL; + struct cpfl_rule_cfg_data cfg = {0}; + + /* prepare rule blob */ + if (!dma->va) { + PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; + memset(blob, 0, sizeof(*blob)); + memset(&cfg, 0, sizeof(cfg)); + + /* fill info for both query and add/update */ + cpfl_fill_rule_mod_content(minfo->mod_obj_size, + minfo->pin_mod_content, + minfo->mod_index, + &cfg.ext.mod_content); + + /* only fill content for add/update */ + memcpy(blob->mod_blob, minfo->mod_content, + minfo->mod_content_byte_len); + +#define NO_HOST_NEEDED 0 + /* pack message */ + cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule, + rinfo->cookie, + 0, /* vsi_id not used for mod */ + rinfo->port_num, + NO_HOST_NEEDED, + 0, /* time_sel */ + 0, /* time_sel_val */ + 0, /* cache_wr_thru */ + rinfo->resp_req, + (u16)sizeof(*blob), + (void *)dma, + &cfg.common); + cpfl_prep_rule_desc(&cfg, msg); + return 0; +} + +static int +cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg, bool add) +{ + union cpfl_rule_cfg_pkt_record *blob = NULL; + enum cpfl_ctlq_rule_cfg_opc opc; + struct cpfl_rule_cfg_data cfg; + uint16_t cfg_ctrl; + + if (!dma->va) { + PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; + memset(blob, 0, sizeof(*blob)); + memset(msg, 0, sizeof(*msg)); + + if (rinfo->type == CPFL_RULE_TYPE_SEM) { + cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id, + rinfo->sem.sub_prof_id, + rinfo->sem.pin_to_cache, + rinfo->sem.fixed_fetch); + cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len, + rinfo->act_bytes, rinfo->act_byte_len, + cfg_ctrl, blob); + opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule; + } else { + PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type); + return -1; + } + + cpfl_fill_rule_cfg_data_common(opc, + rinfo->cookie, + rinfo->vsi, + rinfo->port_num, + rinfo->host_id, + 0, /* time_sel */ + 0, /* time_sel_val */ + 0, /* cache_wr_thru */ + rinfo->resp_req, + sizeof(union cpfl_rule_cfg_pkt_record), + dma, + &cfg.common); + cpfl_prep_rule_desc(&cfg, msg); + return 0; +} + +static int +cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg, bool add) +{ + int ret = 0; + + if (rinfo->type == CPFL_RULE_TYPE_SEM) { + if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0) + ret = -1; + } else if (rinfo->type == CPFL_RULE_TYPE_MOD) { + if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0) + ret = -1; + } else { + PMD_INIT_LOG(ERR, "Invalid type of rule"); + ret = -1; + } + + return ret; +} + +int +cpfl_rule_process(struct cpfl_itf *itf, + struct idpf_ctlq_info *tx_cq, + struct idpf_ctlq_info *rx_cq, + struct cpfl_rule_info *rinfo, + int rule_num, + bool add) +{ + struct idpf_hw *hw = &itf->adapter->base.hw; + int i; + int ret = 0; + + if (rule_num == 0) + return 0; + + for (i = 0; i < rule_num; i++) { + ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add); + if (ret) { + PMD_INIT_LOG(ERR, "Could not pack rule"); + return ret; + } + } + ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to send control message"); + return ret; + } + ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to update rule"); + return ret; + } + + return 0; +} diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h new file mode 100644 index 0000000000..ed757b80b1 --- /dev/null +++ b/drivers/net/cpfl/cpfl_fxp_rule.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_FXP_RULE_H_ +#define _CPFL_FXP_RULE_H_ + +#include "cpfl_rules.h" + +#define CPFL_MAX_KEY_LEN 128 +#define CPFL_MAX_RULE_ACTIONS 32 + +struct cpfl_sem_rule_info { + uint16_t prof_id; + uint8_t sub_prof_id; + uint8_t key[CPFL_MAX_KEY_LEN]; + uint8_t key_byte_len; + uint8_t pin_to_cache; + uint8_t fixed_fetch; +}; + +#define CPFL_MAX_MOD_CONTENT_LEN 256 +struct cpfl_mod_rule_info { + uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN]; + uint8_t mod_content_byte_len; + uint32_t mod_index; + uint8_t pin_mod_content; + uint8_t mod_obj_size; +}; + +enum cpfl_rule_type { + CPFL_RULE_TYPE_NONE, + CPFL_RULE_TYPE_SEM, + CPFL_RULE_TYPE_MOD +}; + +struct cpfl_rule_info { + enum cpfl_rule_type type; + uint64_t cookie; + uint8_t host_id; + uint8_t port_num; + uint8_t resp_req; + /* TODO: change this to be dynamically allocated/reallocated */ + uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)]; + uint8_t act_byte_len; + /* vsi is used for lem and lpm rules */ + uint16_t vsi; + uint8_t clear_mirror_1st_state; + /* mod related fields */ + union { + struct cpfl_mod_rule_info mod; + struct cpfl_sem_rule_info sem; + }; +}; + +extern struct cpfl_vport_ext *vport; + +int cpfl_rule_process(struct cpfl_itf *itf, + struct idpf_ctlq_info *tx_cq, + struct idpf_ctlq_info *rx_cq, + struct cpfl_rule_info *rinfo, + int rule_num, + bool add); +int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]); +int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]); +#endif /*CPFL_FXP_RULE_H*/ diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index e2b6621cea..6118a16329 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -45,6 +45,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON') sources += files( 'cpfl_flow.c', 'cpfl_flow_parser.c', + 'cpfl_fxp_rule.c', ) ext_deps += jansson_dep endif -- 2.34.1