DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bing Zhao <bingz@nvidia.com>
To: <viacheslavo@nvidia.com>, <matan@nvidia.com>
Cc: <dev@dpdk.org>, <rasland@nvidia.com>, <thomas@monjalon.net>,
	<orika@nvidia.com>
Subject: [dpdk-dev] [PATCH v2 1/2] net/mlx5: add support for Rx queue delay drop
Date: Thu, 4 Nov 2021 16:01:53 +0200	[thread overview]
Message-ID: <20211104140154.51122-2-bingz@nvidia.com> (raw)
In-Reply-To: <20211104140154.51122-1-bingz@nvidia.com>

For an Ethernet RQ, packets received when receive WQEs are exhausted
are dropped. This behavior prevents slow or malicious software
entities at the host from affecting the network. While for hairpin
cases, even if there is no software involved during the packet
forwarding from Rx to Tx side, some hiccup in the hardware or back
pressure from Tx side may still cause the WQEs to be exhausted. In
certain scenarios it may be preferred to configure the device to
avoid such packet drops, assuming the posting of WQEs will resume
shortly.

To support this, a new devarg "delay_drop_en" is introduced, by
default, the delay drop is enabled for hairpin Rx queues and
disabled for standard Rx queues. This value is used as a bit mask:
  - bit 0: enablement of standard Rx queue
  - bit 1: enablement of hairpin Rx queue
And this attribute will be applied to all Rx queues of a device.

The "rq_delay_drop" capability in the HCA_CAP is checked before
creating any queue. If the hardware capabilities do not support
this delay drop, all the Rx queues will still be created without
this attribute, and the devarg setting will be ignored even if it
is specified explicitly.

Signed-off-by: Bing Zhao <bingz@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c |  1 +
 drivers/common/mlx5/mlx5_devx_cmds.h |  1 +
 drivers/net/mlx5/linux/mlx5_os.c     | 11 +++++++++++
 drivers/net/mlx5/mlx5.c              |  7 +++++++
 drivers/net/mlx5/mlx5.h              |  9 +++++++++
 drivers/net/mlx5/mlx5_devx.c         |  5 +++++
 drivers/net/mlx5/mlx5_rx.h           |  1 +
 7 files changed, 35 insertions(+)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 12c114a91b..eaf1dd5046 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -962,6 +962,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 	attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr,
 					 general_obj_types) &
 			      MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
+	attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop);
 	if (attr->qos.sup) {
 		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
 				MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 2326f1e968..25e2814ac0 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -176,6 +176,7 @@ struct mlx5_hca_attr {
 	uint32_t swp_csum:1;
 	uint32_t swp_lso:1;
 	uint32_t lro_max_msg_sz_mode:2;
+	uint32_t rq_delay_drop:1;
 	uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
 	uint16_t lro_min_mss_size;
 	uint32_t flex_parser_protocols;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index f51da8c3a3..e8894239ed 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1506,6 +1506,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		goto error;
 #endif
 	}
+	if (config->std_delay_drop || config->hp_delay_drop) {
+		if (!config->hca_attr.rq_delay_drop) {
+			config->std_delay_drop = 0;
+			config->hp_delay_drop = 0;
+			DRV_LOG(WARNING,
+				"dev_port-%u: Rxq delay drop is not supported",
+				priv->dev_port);
+		}
+	}
 	if (sh->devx) {
 		uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
 
@@ -2075,6 +2084,8 @@ mlx5_os_config_default(struct mlx5_dev_config *config)
 	config->decap_en = 1;
 	config->log_hp_size = MLX5_ARG_UNSET;
 	config->allow_duplicate_pattern = 1;
+	config->std_delay_drop = 0;
+	config->hp_delay_drop = 0;
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index dc15688f21..80a6692b94 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -183,6 +183,9 @@
 /* Device parameter to configure implicit registration of mempool memory. */
 #define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
 
+/* Device parameter to configure the delay drop when creating Rxqs. */
+#define MLX5_DELAY_DROP_EN "delay_drop_en"
+
 /* Shared memory between primary and secondary processes. */
 struct mlx5_shared_data *mlx5_shared_data;
 
@@ -2095,6 +2098,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
 		config->decap_en = !!tmp;
 	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
 		config->allow_duplicate_pattern = !!tmp;
+	} else if (strcmp(MLX5_DELAY_DROP_EN, key) == 0) {
+		config->std_delay_drop = tmp & MLX5_DELAY_DROP_STANDARD;
+		config->hp_delay_drop = tmp & MLX5_DELAY_DROP_HAIRPIN;
 	} else {
 		DRV_LOG(WARNING, "%s: unknown parameter", key);
 		rte_errno = EINVAL;
@@ -2157,6 +2163,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 		MLX5_DECAP_EN,
 		MLX5_ALLOW_DUPLICATE_PATTERN,
 		MLX5_MR_MEMPOOL_REG_EN,
+		MLX5_DELAY_DROP_EN,
 		NULL,
 	};
 	struct rte_kvargs *kvlist;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 74af88ec19..8d32d55c9a 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -99,6 +99,13 @@ enum mlx5_flow_type {
 	MLX5_FLOW_TYPE_MAXI,
 };
 
+/* The mode of delay drop for Rx queues. */
+enum mlx5_delay_drop_mode {
+	MLX5_DELAY_DROP_NONE = 0, /* All disabled. */
+	MLX5_DELAY_DROP_STANDARD = RTE_BIT32(0), /* Standard queues enable. */
+	MLX5_DELAY_DROP_HAIRPIN = RTE_BIT32(1), /* Hairpin queues enable. */
+};
+
 /* Hlist and list callback context. */
 struct mlx5_flow_cb_ctx {
 	struct rte_eth_dev *dev;
@@ -264,6 +271,8 @@ struct mlx5_dev_config {
 	unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
 	unsigned int allow_duplicate_pattern:1;
 	/* Allow/Prevent the duplicate rules pattern. */
+	unsigned int std_delay_drop:1; /* Enable standard Rxq delay drop. */
+	unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
 	struct {
 		unsigned int enabled:1; /* Whether MPRQ is enabled. */
 		unsigned int stride_num_n; /* Number of strides. */
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 424f77be79..2e1d849eab 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -280,6 +280,7 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,
 						MLX5_WQ_END_PAD_MODE_NONE;
 	rq_attr.wq_attr.pd = cdev->pdn;
 	rq_attr.counter_set_id = priv->counter_set_id;
+	rq_attr.delay_drop_en = rxq_data->delay_drop;
 	/* Create RQ using DevX API. */
 	return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size,
 				   log_desc_n, &rq_attr, rxq_ctrl->socket);
@@ -443,6 +444,8 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
 			attr.wq_attr.log_hairpin_data_sz -
 			MLX5_HAIRPIN_QUEUE_STRIDE;
 	attr.counter_set_id = priv->counter_set_id;
+	rxq_data->delay_drop = priv->config.hp_delay_drop;
+	attr.delay_drop_en = priv->config.hp_delay_drop;
 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr,
 					   rxq_ctrl->socket);
 	if (!tmpl->rq) {
@@ -503,6 +506,7 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 		DRV_LOG(ERR, "Failed to create CQ.");
 		goto error;
 	}
+	rxq_data->delay_drop = priv->config.std_delay_drop;
 	/* Create RQ using DevX API. */
 	ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
 	if (ret) {
@@ -921,6 +925,7 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
 	rxq_ctrl->priv = priv;
 	rxq_ctrl->obj = rxq;
 	rxq_data = &rxq_ctrl->rxq;
+	rxq_data->delay_drop = 0;
 	/* Create CQ using DevX API. */
 	ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
 	if (ret != 0) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 69b1263339..05807764b8 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -92,6 +92,7 @@ struct mlx5_rxq_data {
 	unsigned int lro:1; /* Enable LRO. */
 	unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
 	unsigned int mcqe_format:3; /* CQE compression format. */
+	unsigned int delay_drop:1; /* Enable delay drop. */
 	volatile uint32_t *rq_db;
 	volatile uint32_t *cq_db;
 	uint16_t port_id;
-- 
2.27.0


  reply	other threads:[~2021-11-04 14:02 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-04 11:26 [dpdk-dev] [PATCH 0/4] Add delay drop support for Rx queue Bing Zhao
2021-11-04 11:26 ` [dpdk-dev] [PATCH 1/4] common/mlx5: support delay drop capabilities query Bing Zhao
2021-11-04 11:26 ` [dpdk-dev] [PATCH 2/4] net/mlx5: add support for Rx queue delay drop Bing Zhao
2021-11-04 14:01   ` David Marchand
2021-11-04 14:34     ` Bing Zhao
2021-11-04 11:26 ` [dpdk-dev] [PATCH 3/4] net/mlx5: support querying delay drop status via ethtool Bing Zhao
2021-11-04 11:26 ` [dpdk-dev] [PATCH 4/4] doc: update the description for Rx delay drop Bing Zhao
2021-11-04 14:01 ` [dpdk-dev] [PATCH v2 0/2] Add delay drop support for Rx queue Bing Zhao
2021-11-04 14:01   ` Bing Zhao [this message]
2021-11-04 14:01   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: check delay drop settings in kernel driver Bing Zhao
2021-11-04 16:55 ` [dpdk-dev] [PATCH v3 0/2] Add delay drop support for Rx queue Bing Zhao
2021-11-04 16:55   ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: add support for Rx queue delay drop Bing Zhao
2021-11-04 16:55   ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: check delay drop settings in kernel driver Bing Zhao
2021-11-04 17:59 ` [dpdk-dev] [PATCH v4 0/2] Add delay drop support for Rx queue Bing Zhao
2021-11-04 17:59   ` [dpdk-dev] [PATCH v4 1/2] net/mlx5: add support for Rx queue delay drop Bing Zhao
2021-11-04 18:22     ` Slava Ovsiienko
2021-11-04 17:59   ` [dpdk-dev] [PATCH v4 2/2] net/mlx5: check delay drop settings in kernel driver Bing Zhao
2021-11-04 18:22     ` Slava Ovsiienko
2021-11-04 21:46   ` [dpdk-dev] [PATCH v4 0/2] Add delay drop support for Rx queue Raslan Darawsheh
2021-11-05 13:36 ` [dpdk-dev] [PATCH v5 " Bing Zhao
2021-11-05 13:36   ` [dpdk-dev] [PATCH v5 1/2] net/mlx5: add support for Rx queue delay drop Bing Zhao
2021-11-05 13:36   ` [dpdk-dev] [PATCH v5 2/2] net/mlx5: check delay drop settings in kernel driver Bing Zhao
2021-11-05 14:28 ` [dpdk-dev] [PATCH v6 0/2] Add delay drop support for Rx queue Bing Zhao
2021-11-05 14:28   ` [dpdk-dev] [PATCH v6 1/2] net/mlx5: add support for Rx queue delay drop Bing Zhao
2021-11-05 14:28   ` [dpdk-dev] [PATCH v6 2/2] net/mlx5: check delay drop settings in kernel driver Bing Zhao
2021-11-05 15:30 ` [dpdk-dev] [PATCH v7 0/2] Add delay drop support for Rx queue Bing Zhao
2021-11-05 15:30   ` [dpdk-dev] [PATCH v7 1/2] net/mlx5: add support for Rx queue delay drop Bing Zhao
2021-11-05 15:30   ` [dpdk-dev] [PATCH v7 2/2] net/mlx5: check delay drop settings in kernel driver Bing Zhao
2021-11-05 16:07   ` [dpdk-dev] [PATCH v7 0/2] Add delay drop support for Rx queue Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211104140154.51122-2-bingz@nvidia.com \
    --to=bingz@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).