From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8AE2DA0563; Mon, 23 Mar 2020 08:20:53 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0C9DE1C1C0; Mon, 23 Mar 2020 08:16:10 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id F022E1C067 for ; Mon, 23 Mar 2020 08:15:49 +0100 (CET) IronPort-SDR: 1Dk728T2bckndVj48/gkHaoRM19ShlDctboHw23hLd/VHENXd3V34mCpJIgqDLTWJkJ1Zx0APw BIJt2U0i2p4w== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2020 00:15:49 -0700 IronPort-SDR: Up3FxM+t+aUpoJCFWEe6W8pozHa7faSd7P/KWS7XQa84MiNCjIlZXa24J5JuBJWw9nnoLF1NC0 JV9FxJQAONjw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.72,295,1580803200"; d="scan'208";a="246111852" Received: from dpdk51.sh.intel.com ([10.67.110.245]) by orsmga003.jf.intel.com with ESMTP; 23 Mar 2020 00:15:47 -0700 From: Qi Zhang To: qiming.yang@intel.com Cc: dev@dpdk.org, xiaolong.ye@intel.com, Qi Zhang , Dan Nowlin , Paul M Stillwell Jr Date: Mon, 23 Mar 2020 15:17:58 +0800 Message-Id: <20200323071759.13075-36-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20200323071759.13075-1-qi.z.zhang@intel.com> References: <20200309114357.31800-1-qi.z.zhang@intel.com> <20200323071759.13075-1-qi.z.zhang@intel.com> Subject: [dpdk-dev] [PATCH v2 35/36] net/ice/base: add reference count to tunnels X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add a lock for protecting the tunnel table while adding, removing and searching tunnels. Add reference counting to tunnels so that multiple instances of the same tunnel port can be created. Only physically destroy the tunnel when all instances of that tunnel have been destroyed. Signed-off-by: Dan Nowlin Signed-off-by: Paul M Stillwell Jr Signed-off-by: Qi Zhang --- drivers/net/ice/base/ice_common.c | 2 + drivers/net/ice/base/ice_flex_pipe.c | 95 ++++++++++++++++++++++++++++++------ drivers/net/ice/base/ice_flex_type.h | 1 + drivers/net/ice/base/ice_type.h | 1 + 4 files changed, 83 insertions(+), 16 deletions(-) diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c index 3fdc93ce9..0cf578c34 100644 --- a/drivers/net/ice/base/ice_common.c +++ b/drivers/net/ice/base/ice_common.c @@ -728,6 +728,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct; + ice_init_lock(&hw->tnl_lock); return ICE_SUCCESS; err_unroll_fltr_mgmt_struct: @@ -759,6 +760,7 @@ void ice_deinit_hw(struct ice_hw *hw) ice_sched_clear_agg(hw); ice_free_seg(hw); ice_free_hw_tbls(hw); + ice_destroy_lock(&hw->tnl_lock); if (hw->port_info) { ice_free(hw, hw->port_info); diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c index 213aceef5..de14d9d9d 100644 --- a/drivers/net/ice/base/ice_flex_pipe.c +++ b/drivers/net/ice/base/ice_flex_pipe.c @@ -1883,7 +1883,7 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) } /** - * ice_tunnel_port_in_use + * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage * @hw: pointer to the HW structure * @port: port to search for * @index: optionally returns index @@ -1891,7 +1891,7 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) * Returns whether a port is already in use as a tunnel, and optionally its * index */ -bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) +static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) { u16 i; @@ -1906,6 +1906,26 @@ bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) } /** + * ice_tunnel_port_in_use + * @hw: pointer to the HW structure + * @port: port to search for + * @index: optionally returns index + * + * Returns whether a port is already in use as a tunnel, and optionally its + * index + */ +bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) +{ + bool res; + + ice_acquire_lock(&hw->tnl_lock); + res = ice_tunnel_port_in_use_hlpr(hw, port, index); + ice_release_lock(&hw->tnl_lock); + + return res; +} + +/** * ice_tunnel_get_type * @hw: pointer to the HW structure * @port: port to search for @@ -1916,15 +1936,21 @@ bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) bool ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) { + bool res = false; u16 i; + ice_acquire_lock(&hw->tnl_lock); + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { *type = hw->tnl.tbl[i].type; - return true; + res = true; + break; } - return false; + ice_release_lock(&hw->tnl_lock); + + return res; } /** @@ -1962,16 +1988,22 @@ bool ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, u16 *port) { + bool res = false; u16 i; + ice_acquire_lock(&hw->tnl_lock); + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { *port = hw->tnl.tbl[i].port; - return true; + res = true; + break; } - return false; + ice_release_lock(&hw->tnl_lock); + + return res; } /** @@ -1992,15 +2024,24 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) struct ice_buf_build *bld; u16 index; - if (ice_tunnel_port_in_use(hw, port, NULL)) - return ICE_ERR_ALREADY_EXISTS; + ice_acquire_lock(&hw->tnl_lock); - if (!ice_find_free_tunnel_entry(hw, type, &index)) - return ICE_ERR_OUT_OF_RANGE; + if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { + hw->tnl.tbl[index].ref++; + status = ICE_SUCCESS; + goto ice_create_tunnel_end; + } + + if (!ice_find_free_tunnel_entry(hw, type, &index)) { + status = ICE_ERR_OUT_OF_RANGE; + goto ice_create_tunnel_end; + } bld = ice_pkg_buf_alloc(hw); - if (!bld) - return ICE_ERR_NO_MEMORY; + if (!bld) { + status = ICE_ERR_NO_MEMORY; + goto ice_create_tunnel_end; + } /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) @@ -2040,11 +2081,15 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) if (!status) { hw->tnl.tbl[index].port = port; hw->tnl.tbl[index].in_use = true; + hw->tnl.tbl[index].ref = 1; } ice_create_tunnel_err: ice_pkg_buf_free(hw, bld); +ice_create_tunnel_end: + ice_release_lock(&hw->tnl_lock); + return status; } @@ -2064,24 +2109,38 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; u16 count = 0; + u16 index; u16 size; u16 i; + ice_acquire_lock(&hw->tnl_lock); + + if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) + if (hw->tnl.tbl[index].ref > 1) { + hw->tnl.tbl[index].ref--; + status = ICE_SUCCESS; + goto ice_destroy_tunnel_end; + } + /* determine count */ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (all || hw->tnl.tbl[i].port == port)) count++; - if (!count) - return ICE_ERR_PARAM; + if (!count) { + status = ICE_ERR_PARAM; + goto ice_destroy_tunnel_end; + } /* size of section - there is at least one entry */ size = ice_struct_size(sect_rx, tcam, count - 1); bld = ice_pkg_buf_alloc(hw); - if (!bld) - return ICE_ERR_NO_MEMORY; + if (!bld) { + status = ICE_ERR_NO_MEMORY; + goto ice_destroy_tunnel_end; + } /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) @@ -2123,6 +2182,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].marked) { + hw->tnl.tbl[i].ref = 0; hw->tnl.tbl[i].port = 0; hw->tnl.tbl[i].in_use = false; hw->tnl.tbl[i].marked = false; @@ -2131,6 +2191,9 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) ice_destroy_tunnel_err: ice_pkg_buf_free(hw, bld); +ice_destroy_tunnel_end: + ice_release_lock(&hw->tnl_lock); + return status; } diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h index 2c5860887..4e4ba4deb 100644 --- a/drivers/net/ice/base/ice_flex_type.h +++ b/drivers/net/ice/base/ice_flex_type.h @@ -531,6 +531,7 @@ struct ice_tunnel_entry { enum ice_tunnel_type type; u16 boost_addr; u16 port; + u16 ref; struct ice_boost_tcam_entry *boost_entry; u8 valid; u8 in_use; diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h index 394867f35..da2b3548d 100644 --- a/drivers/net/ice/base/ice_type.h +++ b/drivers/net/ice/base/ice_type.h @@ -888,6 +888,7 @@ struct ice_hw { u32 pkg_size; /* tunneling info */ + struct ice_lock tnl_lock; struct ice_tunnel_table tnl; struct ice_acl_tbl *acl_tbl; -- 2.13.6