From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 72D24728E for ; Mon, 15 Jan 2018 17:58:25 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 15 Jan 2018 08:58:25 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.46,364,1511856000"; d="scan'208";a="22396067" Received: from silpixa00389036.ir.intel.com (HELO silpixa00389036.ger.corp.intel.com) ([10.237.223.231]) by fmsmga001.fm.intel.com with ESMTP; 15 Jan 2018 08:58:23 -0800 From: Kirill Rybalchenko To: dev@dpdk.org Cc: kirill.rybalchenko@intel.com, andrey.chilikin@intel.com, thomas@monjalon.net, ferruh.yigit@intel.com Date: Mon, 15 Jan 2018 16:58:20 +0000 Message-Id: <1516035500-6010-1-git-send-email-kirill.rybalchenko@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1511785787-127452-1-git-send-email-kirill.rybalchenko@intel.com> References: <1511785787-127452-1-git-send-email-kirill.rybalchenko@intel.com> Subject: [dpdk-dev] [PATCH v2] ethdev: increase flow type limit from 32 to 64 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 15 Jan 2018 16:58:27 -0000 Increase the internal limit for flow types from 32 to 64 to support future flow type extensions. Change type of variables from uint32_t[] to uint64_t[]: rte_eth_fdir_info.flow_types_mask rte_eth_hash_global_conf.sym_hash_enable_mask rte_eth_hash_global_conf.valid_bit_mask This modification affects the following components: net/i40e net/ixgbe app/testpmd v2: implement versioning of rte_eth_dev_filter_ctrl function for ABI backward compatibility with version 17.11 and older Signed-off-by: Kirill Rybalchenko --- app/test-pmd/cmdline.c | 22 ++--- drivers/net/i40e/i40e_ethdev.c | 38 ++++---- drivers/net/i40e/i40e_fdir.c | 25 ++--- drivers/net/ixgbe/ixgbe_fdir.c | 22 +++-- lib/librte_ether/rte_eth_ctrl.h | 12 +-- lib/librte_ether/rte_ethdev.c | 157 +++++++++++++++++++++++++++++++- lib/librte_ether/rte_ethdev_version.map | 7 ++ 7 files changed, 224 insertions(+), 59 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 6d71a5f..964d4ed 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -10803,7 +10803,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result, struct rte_eth_fdir_info fdir_info; struct rte_eth_fdir_flex_mask flex_mask; struct rte_port *port; - uint32_t flow_type_mask; + uint64_t flow_type_mask; uint16_t i; int ret; @@ -10856,7 +10856,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result, return; } for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { - if (flow_type_mask & (1 << i)) { + if (flow_type_mask & (1ULL << i)) { flex_mask.flow_type = i; fdir_set_flex_mask(res->port_id, &flex_mask); } @@ -10865,7 +10865,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result, return; } flex_mask.flow_type = str2flowtype(res->flow_type); - if (!(flow_type_mask & (1 << flex_mask.flow_type))) { + if (!(flow_type_mask & (1ULL << flex_mask.flow_type))) { printf("Flow type %s not supported on port %d\n", res->flow_type, res->port_id); return; @@ -11227,10 +11227,10 @@ cmd_get_hash_global_config_parsed(void *parsed_result, } for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { - idx = i / UINT32_BIT; - offset = i % UINT32_BIT; + idx = i / UINT64_BIT; + offset = i % UINT64_BIT; if (!(info.info.global_conf.valid_bit_mask[idx] & - (1UL << offset))) + (1ULL << offset))) continue; str = flowtype_to_str(i); if (!str) @@ -11238,7 +11238,7 @@ cmd_get_hash_global_config_parsed(void *parsed_result, printf("Symmetric hash is %s globally for flow type %s " "by port %d\n", ((info.info.global_conf.sym_hash_enable_mask[idx] & - (1UL << offset)) ? "enabled" : "disabled"), str, + (1ULL << offset)) ? "enabled" : "disabled"), str, res->port_id); } } @@ -11299,12 +11299,12 @@ cmd_set_hash_global_config_parsed(void *parsed_result, RTE_ETH_HASH_FUNCTION_DEFAULT; ftype = str2flowtype(res->flow_type); - idx = ftype / (CHAR_BIT * sizeof(uint32_t)); - offset = ftype % (CHAR_BIT * sizeof(uint32_t)); - info.info.global_conf.valid_bit_mask[idx] |= (1UL << offset); + idx = ftype / UINT64_BIT; + offset = ftype % UINT64_BIT; + info.info.global_conf.valid_bit_mask[idx] |= (1ULL << offset); if (!strcmp(res->enable, "enable")) info.info.global_conf.sym_hash_enable_mask[idx] |= - (1UL << offset); + (1ULL << offset); ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH, RTE_ETH_FILTER_SET, &info); if (ret < 0) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 7796e9e..d0473f0 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -8134,14 +8134,17 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); /* - * We work only with lowest 32 bits which is not correct, but to work - * properly the valid_bit_mask size should be increased up to 64 bits - * and this will brake ABI. This modification will be done in next - * release + * As i40e supports less than 64 flow types, only first 64 bits need to + * be checked. */ - g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask; + for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { + g_cfg->valid_bit_mask[i] = 0ULL; + g_cfg->sym_hash_enable_mask[i] = 0ULL; + } - for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) { + g_cfg->valid_bit_mask[0] = adapter->flow_types_mask; + + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { if (!adapter->pctypes_tbl[i]) continue; for (j = I40E_FILTER_PCTYPE_INVALID + 1; @@ -8150,7 +8153,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j)); if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) { g_cfg->sym_hash_enable_mask[0] |= - (1UL << i); + (1ULL << i); } } } @@ -8164,7 +8167,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter, const struct rte_eth_hash_global_conf *g_cfg) { uint32_t i; - uint32_t mask0, i40e_mask = adapter->flow_types_mask; + uint64_t mask0, i40e_mask = adapter->flow_types_mask; if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && @@ -8175,7 +8178,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter, } /* - * As i40e supports less than 32 flow types, only first 32 bits need to + * As i40e supports less than 64 flow types, only first 64 bits need to * be checked. */ mask0 = g_cfg->valid_bit_mask[0]; @@ -8211,23 +8214,20 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, int ret; uint16_t i, j; uint32_t reg; - /* - * We work only with lowest 32 bits which is not correct, but to work - * properly the valid_bit_mask size should be increased up to 64 bits - * and this will brake ABI. This modification will be done in next - * release - */ - uint32_t mask0 = g_cfg->valid_bit_mask[0] & - (uint32_t)adapter->flow_types_mask; + uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask; /* Check the input parameters */ ret = i40e_hash_global_config_check(adapter, g_cfg); if (ret < 0) return ret; - for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) { + /* + * As i40e supports less than 64 flow types, only first 64 bits need to + * be configured. + */ + for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) { if (mask0 & (1UL << i)) { - reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? + reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ? I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; for (j = I40E_FILTER_PCTYPE_INVALID + 1; diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index 906c204..3a9e656 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -66,17 +66,17 @@ #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF) #define I40E_FDIR_FLOWS ( \ - (1 << RTE_ETH_FLOW_FRAG_IPV4) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ - (1 << RTE_ETH_FLOW_FRAG_IPV6) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ - (1 << RTE_ETH_FLOW_L2_PAYLOAD)) + (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ + (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)) static int i40e_fdir_filter_programming(struct i40e_pf *pf, enum i40e_filter_pctype pctype, @@ -1999,6 +1999,7 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir) struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint16_t num_flex_set = 0; uint16_t num_flex_mask = 0; + uint16_t i; if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) fdir->mode = RTE_FDIR_MODE_PERFECT; @@ -2011,6 +2012,8 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir) (uint32_t)hw->func_caps.fd_filters_best_effort; fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN; fdir->flow_types_mask[0] = I40E_FDIR_FLOWS; + for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++) + fdir->flow_types_mask[i] = 0ULL; fdir->flex_payload_unit = sizeof(uint16_t); fdir->flex_bitmask_unit = sizeof(uint16_t); fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED; diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index 551580c..236ab8c 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -41,14 +41,14 @@ #define IXGBE_FDIRCMD_CMD_INTERVAL_US 10 #define IXGBE_FDIR_FLOW_TYPES ( \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)) + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)) #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \ uint8_t ipv6_addr[16]; \ @@ -1407,7 +1407,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); - uint32_t fdirctrl, max_num; + uint32_t fdirctrl, max_num, i; uint8_t offset; fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); @@ -1439,9 +1439,11 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN || fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL) - fdir_info->flow_types_mask[0] = 0; + fdir_info->flow_types_mask[0] = 0ULL; else fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES; + for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++) + fdir_info->flow_types_mask[i] = 0ULL; fdir_info->flex_payload_unit = sizeof(uint16_t); fdir_info->max_flex_payload_segment_num = 1; diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ether/rte_eth_ctrl.h index 7991efa..668f59a 100644 --- a/lib/librte_ether/rte_eth_ctrl.h +++ b/lib/librte_ether/rte_eth_ctrl.h @@ -662,9 +662,9 @@ enum rte_fdir_mode { RTE_FDIR_MODE_PERFECT_TUNNEL, /**< Enable FDIR filter mode - tunnel. */ }; -#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t)) +#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t)) #define RTE_FLOW_MASK_ARRAY_SIZE \ - (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT) + (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT) /** * A structure used to get the information of flow director filter. @@ -681,7 +681,7 @@ struct rte_eth_fdir_info { uint32_t guarant_spc; /**< Guaranteed spaces.*/ uint32_t best_spc; /**< Best effort spaces.*/ /** Bit mask for every supported flow type. */ - uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE]; + uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE]; uint32_t max_flexpayload; /**< Total flex payload in bytes. */ /** Flexible payload unit in bytes. Size and alignments of all flex payload segments should be multiplies of this value. */ @@ -774,7 +774,7 @@ enum rte_eth_hash_function { }; #define RTE_SYM_HASH_MASK_ARRAY_SIZE \ - (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT) + (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT) /** * A structure used to set or get global hash function configurations which * include symmetric hash enable per flow type and hash function type. @@ -787,9 +787,9 @@ enum rte_eth_hash_function { struct rte_eth_hash_global_conf { enum rte_eth_hash_function hash_func; /**< Hash function type */ /** Bit mask for symmetric hash enable per flow type */ - uint32_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE]; + uint64_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE]; /** Bit mask indicates if the corresponding bit is valid */ - uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE]; + uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE]; }; /** diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index b349599..d30eabc 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "rte_ether.h" #include "rte_ethdev.h" @@ -3148,8 +3149,154 @@ rte_eth_dev_filter_supported(uint16_t port_id, } int -rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, - enum rte_filter_op filter_op, void *arg) +rte_eth_dev_filter_ctrl_v22(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg); + +int +rte_eth_dev_filter_ctrl_v22(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + struct rte_eth_fdir_info_v22 { + enum rte_fdir_mode mode; + struct rte_eth_fdir_masks mask; + struct rte_eth_fdir_flex_conf flex_conf; + uint32_t guarant_spc; + uint32_t best_spc; + uint32_t flow_types_mask[1]; + uint32_t max_flexpayload; + uint32_t flex_payload_unit; + uint32_t max_flex_payload_segment_num; + uint16_t flex_payload_limit; + uint32_t flex_bitmask_unit; + uint32_t max_flex_bitmask_num; + }; + + struct rte_eth_hash_global_conf_v22 { + enum rte_eth_hash_function hash_func; + uint32_t sym_hash_enable_mask[1]; + uint32_t valid_bit_mask[1]; + }; + + struct rte_eth_hash_filter_info_v22 { + enum rte_eth_hash_filter_info_type info_type; + union { + uint8_t enable; + struct rte_eth_hash_global_conf_v22 global_conf; + struct rte_eth_input_set_conf input_set_conf; + } info; + }; + + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); + if (filter_op == RTE_ETH_FILTER_INFO) { + int retval; + struct rte_eth_fdir_info_v22 *fdir_info_v22; + struct rte_eth_fdir_info fdir_info; + + fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg; + + retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type, + filter_op, (void *)&fdir_info); + fdir_info_v22->mode = fdir_info.mode; + fdir_info_v22->mask = fdir_info.mask; + fdir_info_v22->flex_conf = fdir_info.flex_conf; + fdir_info_v22->guarant_spc = fdir_info.guarant_spc; + fdir_info_v22->best_spc = fdir_info.best_spc; + fdir_info_v22->flow_types_mask[0] = + (uint32_t)fdir_info.flow_types_mask[0]; + fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload; + fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit; + fdir_info_v22->max_flex_payload_segment_num = + fdir_info.max_flex_payload_segment_num; + fdir_info_v22->flex_payload_limit = + fdir_info.flex_payload_limit; + fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit; + fdir_info_v22->max_flex_bitmask_num = + fdir_info.max_flex_bitmask_num; + return retval; + } else if (filter_op == RTE_ETH_FILTER_GET) { + int retval; + struct rte_eth_hash_filter_info filter_info; + struct rte_eth_hash_filter_info_v22 *filter_info_v22 = + (struct rte_eth_hash_filter_info_v22 *)arg; + + filter_info.info_type = filter_info_v22->info_type; + retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type, + filter_op, (void *)&filter_info); + + switch (filter_info_v22->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + filter_info_v22->info.enable = filter_info.info.enable; + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + filter_info_v22->info.global_conf.hash_func = + filter_info.info.global_conf.hash_func; + filter_info_v22-> + info.global_conf.sym_hash_enable_mask[0] = + (uint32_t)filter_info.info. + global_conf.sym_hash_enable_mask[0]; + filter_info_v22->info.global_conf.valid_bit_mask[0] = + (uint32_t)filter_info.info.global_conf. + valid_bit_mask[0]; + break; + case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: + filter_info_v22->info.input_set_conf = + filter_info.info.input_set_conf; + break; + default: + break; + } + return retval; + } else if (filter_op == RTE_ETH_FILTER_SET) { + struct rte_eth_hash_filter_info filter_info; + struct rte_eth_hash_filter_info_v22 *filter_info_v22 = + (struct rte_eth_hash_filter_info_v22 *)arg; + + filter_info.info_type = filter_info_v22->info_type; + switch (filter_info_v22->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + filter_info.info.enable = filter_info_v22->info.enable; + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + filter_info.info.global_conf.hash_func = + filter_info_v22->info.global_conf.hash_func; + filter_info.info.global_conf.sym_hash_enable_mask[0] = + (uint32_t)filter_info_v22-> + info.global_conf.sym_hash_enable_mask[0]; + filter_info.info.global_conf.valid_bit_mask[0] = + (uint32_t)filter_info_v22-> + info.global_conf.valid_bit_mask[0]; + break; + case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: + filter_info.info.input_set_conf = + filter_info_v22->info.input_set_conf; + break; + default: + break; + } + return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, + (void *)&filter_info); + } else + return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, + arg); +} +VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2); + +int +rte_eth_dev_filter_ctrl_v1802(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg); + +int +rte_eth_dev_filter_ctrl_v1802(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) { struct rte_eth_dev *dev; @@ -3159,6 +3306,12 @@ rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP); return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg); } +BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02); +MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg), + rte_eth_dev_filter_ctrl_v1802); + void * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, diff --git a/lib/librte_ether/rte_ethdev_version.map b/lib/librte_ether/rte_ethdev_version.map index e9681ac..2906a18 100644 --- a/lib/librte_ether/rte_ethdev_version.map +++ b/lib/librte_ether/rte_ethdev_version.map @@ -198,6 +198,13 @@ DPDK_17.11 { } DPDK_17.08; +DPDK_18.02 { + global: + + rte_eth_dev_filter_ctrl; + +} DPDK_17.11; + EXPERIMENTAL { global: -- 2.5.5