DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ying Wang <ying.a.wang@intel.com>
To: xiaolong.ye@intel.com, qi.z.zhang@intel.com
Cc: dev@dpdk.org, qiming.yang@intel.com, ying.a.wang@intel.com,
	stable@dpdk.org
Subject: [dpdk-dev] [PATCH] net/ice: cleanup RSS/FDIR profile when device init
Date: Sat, 28 Sep 2019 02:32:58 +0800	[thread overview]
Message-ID: <20190927183258.44578-1-ying.a.wang@intel.com> (raw)

The patch cleanup RSS/FDIR profile resources when device init
to fix profile ID increase issue.

Fixes: d7d150b93070 ("net/ice: enable RSS when device init")
Cc: stable@dpdk.org

Signed-off-by: Ying Wang <ying.a.wang@intel.com>
---
---
This patch depends on the following patches in patchwork:
(1) http://patchwork.dpdk.org/project/dpdk/list/?series=6557
(2) http://patches.dpdk.org/project/dpdk/list/?series=6579
(3) http://patches.dpdk.org/project/dpdk/list/?series=6577
---

 drivers/net/ice/ice_ethdev.c | 115 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 115 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 3abdaffbc..04102728f 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -13,6 +13,7 @@
 #include "base/ice_sched.h"
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
+#include "base/ice_common.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
 #include "ice_generic_flow.h"
@@ -40,6 +41,7 @@ static const char * const ice_valid_args[] = {
 #define ICE_OS_DEFAULT_PKG_NAME		"ICE OS Default Package"
 #define ICE_COMMS_PKG_NAME			"ICE COMMS Package"
 #define ICE_MAX_PKG_FILENAME_SIZE   256
+#define ICE_MAX_RES_DESC_NUM        1024
 
 int ice_logtype_init;
 int ice_logtype_driver;
@@ -1851,6 +1853,113 @@ ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
 	return ret;
 }
 
+static enum ice_status
+ice_aq_get_res_profs(struct ice_hw *hw, uint16_t num_entries,
+		struct ice_aqc_get_allocd_res_desc_resp *resp_buf,
+		uint16_t buf_size, uint16_t res_type,
+		bool res_shared, uint16_t *desc_id,
+		struct ice_sq_cd *cd, uint16_t *num_prof)
+{
+	struct ice_aq_desc aq_desc;
+	int ret;
+
+	struct ice_aqc_get_allocd_res_desc *cmd =
+			&aq_desc.params.get_res_desc;
+
+	if (!resp_buf)
+		return ICE_ERR_PARAM;
+
+	if (buf_size != (num_entries * sizeof(*resp_buf)))
+		return ICE_ERR_PARAM;
+
+	ice_fill_dflt_direct_cmd_desc(&aq_desc,
+			ice_aqc_opc_get_allocd_res_desc);
+
+	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
+				ICE_AQC_RES_TYPE_M) | (res_shared ?
+				ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
+	cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
+	ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_size, cd);
+	if (!ret) {
+		*desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
+		*num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
+	}
+	return ret;
+}
+
+static enum ice_status
+ice_get_hw_res(struct ice_hw *hw, uint16_t type, uint16_t num, uint16_t *res,
+		uint16_t *prof_buf, uint16_t *num_prof)
+{
+	struct ice_aqc_get_allocd_res_desc_resp *resp_buf;
+	int ret;
+	uint16_t buf_len;
+
+	buf_len = sizeof(*resp_buf) + sizeof(resp_buf->elem) * (num - 1);
+	resp_buf = (struct ice_aqc_get_allocd_res_desc_resp *)
+		ice_malloc(hw, buf_len);
+	if (!resp_buf)
+		return -ENOMEM;
+
+	ret = ice_aq_get_res_profs(hw, num, resp_buf, buf_len, type, 1,
+			res, NULL, num_prof);
+	if (ret)
+		goto exit;
+
+	ice_memcpy(prof_buf, resp_buf->elem, sizeof(resp_buf->elem) *
+			(*num_prof), ICE_NONDMA_TO_NONDMA);
+exit:
+	rte_free(resp_buf);
+	return ret;
+}
+
+static int
+ice_clearup_resource(struct ice_hw *hw, uint16_t res_type)
+{
+	int ret;
+	uint16_t prof_id;
+	uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
+	uint16_t first_desc = 1;
+	uint16_t num_prof = 0;
+
+	ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
+			&first_desc, prof_buf, &num_prof);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to get fxp resource");
+		return ret;
+	}
+
+	for (prof_id = 0; prof_id < num_prof; prof_id++) {
+		ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to free fxp resource");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+ice_reset_fxp_resource(struct ice_hw *hw)
+{
+	int ret;
+
+	ret = ice_clearup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
+		return ret;
+	}
+
+	ret = ice_clearup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
+		return ret;
+	}
+
+	return 0;
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1983,6 +2092,12 @@ ice_dev_init(struct rte_eth_dev *dev)
 		return ret;
 	}
 
+	ret = ice_reset_fxp_resource(hw);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
+		return ret;
+	}
+
 	return 0;
 
 err_pf_setup:
-- 
2.15.1


             reply	other threads:[~2019-09-28  2:55 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-27 18:32 Ying Wang [this message]
2019-09-30  9:25 ` Ye Xiaolong
2019-10-18  0:42   ` Wang, Ying A
2019-10-17 18:31 ` [dpdk-dev] [PATCH v2] " Ying Wang
2019-10-18  6:57   ` Ye Xiaolong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190927183258.44578-1-ying.a.wang@intel.com \
    --to=ying.a.wang@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=stable@dpdk.org \
    --cc=xiaolong.ye@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).