* [PATCH] net/mana: suppress TX CQE generation whenever possible
@ 2023-04-28 3:36 longli
2023-05-06 1:32 ` [PATCH v2] " longli
0 siblings, 1 reply; 7+ messages in thread
From: longli @ 2023-04-28 3:36 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: dev, Ajay Sharma, Long Li, stable
From: Long Li <longli@microsoft.com>
When sending TX packets, we don't need a completion for every packet sent.
If packets are sent in a series, the completion of the last packet can be
used to indicate completion of all prior packets.
Signed-off-by: Long Li <longli@microsoft.com>
Cc: stable@dpdk.org
---
drivers/net/mana/mana.h | 3 ++-
drivers/net/mana/tx.c | 33 ++++++++++++++++++++++++++++++---
2 files changed, 32 insertions(+), 4 deletions(-)
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index c79d39daa2..f280d66f6e 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -353,6 +353,7 @@ struct mana_priv {
struct mana_txq_desc {
struct rte_mbuf *pkt;
uint32_t wqe_size_in_bu;
+ bool suppress_tx_cqe;
};
struct mana_rxq_desc {
@@ -401,7 +402,7 @@ struct mana_txq {
/* desc_ring_head is where we put pending requests to ring,
* completion pull off desc_ring_tail
*/
- uint32_t desc_ring_head, desc_ring_tail;
+ uint32_t desc_ring_head, desc_ring_tail, desc_ring_len;
struct mana_mr_btree mr_btree;
struct mana_stats stats;
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index ee0319c71d..c8d3911f85 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -43,9 +43,11 @@ mana_stop_tx_queues(struct rte_eth_dev *dev)
txq->desc_ring_tail =
(txq->desc_ring_tail + 1) % txq->num_desc;
+ txq->desc_ring_len--;
}
txq->desc_ring_head = 0;
txq->desc_ring_tail = 0;
+ txq->desc_ring_len = 0;
memset(&txq->gdma_sq, 0, sizeof(txq->gdma_sq));
memset(&txq->gdma_cq, 0, sizeof(txq->gdma_cq));
@@ -173,13 +175,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int ret;
void *db_page;
uint16_t pkt_sent = 0;
- uint32_t num_comp;
+ uint32_t num_comp, i;
/* Process send completions from GDMA */
num_comp = gdma_poll_completion_queue(&txq->gdma_cq,
txq->gdma_comp_buf, txq->num_desc);
- for (uint32_t i = 0; i < num_comp; i++) {
+ i = 0;
+ while (i < num_comp) {
struct mana_txq_desc *desc =
&txq->desc_ring[txq->desc_ring_tail];
struct mana_tx_comp_oob *oob = (struct mana_tx_comp_oob *)
@@ -204,7 +207,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
desc->pkt = NULL;
txq->desc_ring_tail = (txq->desc_ring_tail + 1) % txq->num_desc;
+ txq->desc_ring_len--;
txq->gdma_sq.tail += desc->wqe_size_in_bu;
+
+ /* If TX CQE suppression is used, don't read more CQE but move
+ * on to the next packet
+ */
+ if (desc->suppress_tx_cqe)
+ continue;
+
+ i++;
}
/* Post send requests to GDMA */
@@ -215,6 +227,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct one_sgl sgl;
uint16_t seg_idx;
+ if (txq->desc_ring_len >= txq->num_desc)
+ break;
+
/* Drop the packet if it exceeds max segments */
if (m_pkt->nb_segs > priv->max_send_sge) {
DRV_LOG(ERR, "send packet segments %d exceeding max",
@@ -310,7 +325,6 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
tx_oob.short_oob.tx_compute_UDP_checksum = 0;
}
- tx_oob.short_oob.suppress_tx_CQE_generation = 0;
tx_oob.short_oob.VCQ_number = txq->gdma_cq.id;
tx_oob.short_oob.VSQ_frame_num =
@@ -362,6 +376,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (seg_idx != m_pkt->nb_segs)
continue;
+ /* If we can at least queue post two WQEs and there are at
+ * least two packets to send, use TX CQE suppression for the
+ * current WQE
+ */
+ if (txq->desc_ring_len + 1 < txq->num_desc &&
+ pkt_idx + 1 < nb_pkts)
+ tx_oob.short_oob.suppress_tx_CQE_generation = 1;
+ else
+ tx_oob.short_oob.suppress_tx_CQE_generation = 0;
+
struct gdma_work_request work_req;
uint32_t wqe_size_in_bu;
@@ -384,8 +408,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Update queue for tracking pending requests */
desc->pkt = m_pkt;
desc->wqe_size_in_bu = wqe_size_in_bu;
+ desc->suppress_tx_cqe =
+ tx_oob.short_oob.suppress_tx_CQE_generation;
txq->desc_ring_head =
(txq->desc_ring_head + 1) % txq->num_desc;
+ txq->desc_ring_len++;
pkt_sent++;
--
2.32.0
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v2] net/mana: suppress TX CQE generation whenever possible
2023-04-28 3:36 [PATCH] net/mana: suppress TX CQE generation whenever possible longli
@ 2023-05-06 1:32 ` longli
2023-05-19 16:34 ` Ferruh Yigit
` (2 more replies)
0 siblings, 3 replies; 7+ messages in thread
From: longli @ 2023-05-06 1:32 UTC (permalink / raw)
To: Ferruh Yigit, Andrew Rybchenko; +Cc: dev, Ajay Sharma, Long Li, stable
From: Long Li <longli@microsoft.com>
When sending TX packets, we don't need a completion for every packet sent.
If packets are sent in a series, the completion of the last packet can be
used to indicate completion of all prior packets.
Cc: stable@dpdk.org
Signed-off-by: Long Li <longli@microsoft.com>
---
Change log
v2: rebased to dpdk-next-net
drivers/net/mana/mana.h | 3 ++-
drivers/net/mana/tx.c | 33 ++++++++++++++++++++++++++++++---
2 files changed, 32 insertions(+), 4 deletions(-)
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index b653e1dd82..be88537f1a 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -353,6 +353,7 @@ struct mana_priv {
struct mana_txq_desc {
struct rte_mbuf *pkt;
uint32_t wqe_size_in_bu;
+ bool suppress_tx_cqe;
};
struct mana_rxq_desc {
@@ -401,7 +402,7 @@ struct mana_txq {
/* desc_ring_head is where we put pending requests to ring,
* completion pull off desc_ring_tail
*/
- uint32_t desc_ring_head, desc_ring_tail;
+ uint32_t desc_ring_head, desc_ring_tail, desc_ring_len;
struct mana_mr_btree mr_btree;
struct mana_stats stats;
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 7f570181ad..5947efbe8d 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -43,9 +43,11 @@ mana_stop_tx_queues(struct rte_eth_dev *dev)
txq->desc_ring_tail =
(txq->desc_ring_tail + 1) % txq->num_desc;
+ txq->desc_ring_len--;
}
txq->desc_ring_head = 0;
txq->desc_ring_tail = 0;
+ txq->desc_ring_len = 0;
memset(&txq->gdma_sq, 0, sizeof(txq->gdma_sq));
memset(&txq->gdma_cq, 0, sizeof(txq->gdma_cq));
@@ -173,13 +175,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int ret;
void *db_page;
uint16_t pkt_sent = 0;
- uint32_t num_comp;
+ uint32_t num_comp, i;
/* Process send completions from GDMA */
num_comp = gdma_poll_completion_queue(&txq->gdma_cq,
txq->gdma_comp_buf, txq->num_desc);
- for (uint32_t i = 0; i < num_comp; i++) {
+ i = 0;
+ while (i < num_comp) {
struct mana_txq_desc *desc =
&txq->desc_ring[txq->desc_ring_tail];
struct mana_tx_comp_oob *oob = (struct mana_tx_comp_oob *)
@@ -204,7 +207,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
desc->pkt = NULL;
txq->desc_ring_tail = (txq->desc_ring_tail + 1) % txq->num_desc;
+ txq->desc_ring_len--;
txq->gdma_sq.tail += desc->wqe_size_in_bu;
+
+ /* If TX CQE suppression is used, don't read more CQE but move
+ * on to the next packet
+ */
+ if (desc->suppress_tx_cqe)
+ continue;
+
+ i++;
}
/* Post send requests to GDMA */
@@ -215,6 +227,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct one_sgl sgl;
uint16_t seg_idx;
+ if (txq->desc_ring_len >= txq->num_desc)
+ break;
+
/* Drop the packet if it exceeds max segments */
if (m_pkt->nb_segs > priv->max_send_sge) {
DP_LOG(ERR, "send packet segments %d exceeding max",
@@ -310,7 +325,6 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
tx_oob.short_oob.tx_compute_UDP_checksum = 0;
}
- tx_oob.short_oob.suppress_tx_CQE_generation = 0;
tx_oob.short_oob.VCQ_number = txq->gdma_cq.id;
tx_oob.short_oob.VSQ_frame_num =
@@ -362,6 +376,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (seg_idx != m_pkt->nb_segs)
continue;
+ /* If we can at least queue post two WQEs and there are at
+ * least two packets to send, use TX CQE suppression for the
+ * current WQE
+ */
+ if (txq->desc_ring_len + 1 < txq->num_desc &&
+ pkt_idx + 1 < nb_pkts)
+ tx_oob.short_oob.suppress_tx_CQE_generation = 1;
+ else
+ tx_oob.short_oob.suppress_tx_CQE_generation = 0;
+
struct gdma_work_request work_req;
uint32_t wqe_size_in_bu;
@@ -384,8 +408,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Update queue for tracking pending requests */
desc->pkt = m_pkt;
desc->wqe_size_in_bu = wqe_size_in_bu;
+ desc->suppress_tx_cqe =
+ tx_oob.short_oob.suppress_tx_CQE_generation;
txq->desc_ring_head =
(txq->desc_ring_head + 1) % txq->num_desc;
+ txq->desc_ring_len++;
pkt_sent++;
--
2.34.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v2] net/mana: suppress TX CQE generation whenever possible
2023-05-06 1:32 ` [PATCH v2] " longli
@ 2023-05-19 16:34 ` Ferruh Yigit
2023-05-19 16:40 ` Ferruh Yigit
2023-05-26 0:19 ` EXTERNAL] [PATCH ] Add checks for the port capabilties Ajay Sharma
2 siblings, 0 replies; 7+ messages in thread
From: Ferruh Yigit @ 2023-05-19 16:34 UTC (permalink / raw)
To: longli, Andrew Rybchenko, Luca Boccassi, Kevin Traynor
Cc: dev, Ajay Sharma, Long Li, stable
On 5/6/2023 2:32 AM, longli@linuxonhyperv.com wrote:
> From: Long Li <longli@microsoft.com>
>
> When sending TX packets, we don't need a completion for every packet sent.
> If packets are sent in a series, the completion of the last packet can be
> used to indicate completion of all prior packets.
>
> Cc: stable@dpdk.org
>
Hi Long,
Patch looks good to me, but I am not sure on the backport request.
This patch is an optimisation update and we tend to get only fixes to
stable trees.
LTS maintainers cc'ed for comment.
> Signed-off-by: Long Li <longli@microsoft.com>
> ---
> Change log
> v2: rebased to dpdk-next-net
>
> drivers/net/mana/mana.h | 3 ++-
> drivers/net/mana/tx.c | 33 ++++++++++++++++++++++++++++++---
> 2 files changed, 32 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
> index b653e1dd82..be88537f1a 100644
> --- a/drivers/net/mana/mana.h
> +++ b/drivers/net/mana/mana.h
> @@ -353,6 +353,7 @@ struct mana_priv {
> struct mana_txq_desc {
> struct rte_mbuf *pkt;
> uint32_t wqe_size_in_bu;
> + bool suppress_tx_cqe;
> };
>
> struct mana_rxq_desc {
> @@ -401,7 +402,7 @@ struct mana_txq {
> /* desc_ring_head is where we put pending requests to ring,
> * completion pull off desc_ring_tail
> */
> - uint32_t desc_ring_head, desc_ring_tail;
> + uint32_t desc_ring_head, desc_ring_tail, desc_ring_len;
>
> struct mana_mr_btree mr_btree;
> struct mana_stats stats;
> diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
> index 7f570181ad..5947efbe8d 100644
> --- a/drivers/net/mana/tx.c
> +++ b/drivers/net/mana/tx.c
> @@ -43,9 +43,11 @@ mana_stop_tx_queues(struct rte_eth_dev *dev)
>
> txq->desc_ring_tail =
> (txq->desc_ring_tail + 1) % txq->num_desc;
> + txq->desc_ring_len--;
> }
> txq->desc_ring_head = 0;
> txq->desc_ring_tail = 0;
> + txq->desc_ring_len = 0;
>
> memset(&txq->gdma_sq, 0, sizeof(txq->gdma_sq));
> memset(&txq->gdma_cq, 0, sizeof(txq->gdma_cq));
> @@ -173,13 +175,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> int ret;
> void *db_page;
> uint16_t pkt_sent = 0;
> - uint32_t num_comp;
> + uint32_t num_comp, i;
>
> /* Process send completions from GDMA */
> num_comp = gdma_poll_completion_queue(&txq->gdma_cq,
> txq->gdma_comp_buf, txq->num_desc);
>
> - for (uint32_t i = 0; i < num_comp; i++) {
> + i = 0;
> + while (i < num_comp) {
> struct mana_txq_desc *desc =
> &txq->desc_ring[txq->desc_ring_tail];
> struct mana_tx_comp_oob *oob = (struct mana_tx_comp_oob *)
> @@ -204,7 +207,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>
> desc->pkt = NULL;
> txq->desc_ring_tail = (txq->desc_ring_tail + 1) % txq->num_desc;
> + txq->desc_ring_len--;
> txq->gdma_sq.tail += desc->wqe_size_in_bu;
> +
> + /* If TX CQE suppression is used, don't read more CQE but move
> + * on to the next packet
> + */
> + if (desc->suppress_tx_cqe)
> + continue;
> +
> + i++;
> }
>
> /* Post send requests to GDMA */
> @@ -215,6 +227,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> struct one_sgl sgl;
> uint16_t seg_idx;
>
> + if (txq->desc_ring_len >= txq->num_desc)
> + break;
> +
> /* Drop the packet if it exceeds max segments */
> if (m_pkt->nb_segs > priv->max_send_sge) {
> DP_LOG(ERR, "send packet segments %d exceeding max",
> @@ -310,7 +325,6 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> tx_oob.short_oob.tx_compute_UDP_checksum = 0;
> }
>
> - tx_oob.short_oob.suppress_tx_CQE_generation = 0;
> tx_oob.short_oob.VCQ_number = txq->gdma_cq.id;
>
> tx_oob.short_oob.VSQ_frame_num =
> @@ -362,6 +376,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> if (seg_idx != m_pkt->nb_segs)
> continue;
>
> + /* If we can at least queue post two WQEs and there are at
> + * least two packets to send, use TX CQE suppression for the
> + * current WQE
> + */
> + if (txq->desc_ring_len + 1 < txq->num_desc &&
> + pkt_idx + 1 < nb_pkts)
> + tx_oob.short_oob.suppress_tx_CQE_generation = 1;
> + else
> + tx_oob.short_oob.suppress_tx_CQE_generation = 0;
> +
> struct gdma_work_request work_req;
> uint32_t wqe_size_in_bu;
>
> @@ -384,8 +408,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> /* Update queue for tracking pending requests */
> desc->pkt = m_pkt;
> desc->wqe_size_in_bu = wqe_size_in_bu;
> + desc->suppress_tx_cqe =
> + tx_oob.short_oob.suppress_tx_CQE_generation;
> txq->desc_ring_head =
> (txq->desc_ring_head + 1) % txq->num_desc;
> + txq->desc_ring_len++;
>
> pkt_sent++;
>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v2] net/mana: suppress TX CQE generation whenever possible
2023-05-06 1:32 ` [PATCH v2] " longli
2023-05-19 16:34 ` Ferruh Yigit
@ 2023-05-19 16:40 ` Ferruh Yigit
2023-05-26 0:19 ` EXTERNAL] [PATCH ] Add checks for the port capabilties Ajay Sharma
2 siblings, 0 replies; 7+ messages in thread
From: Ferruh Yigit @ 2023-05-19 16:40 UTC (permalink / raw)
To: longli, Andrew Rybchenko; +Cc: dev, Ajay Sharma, Long Li, stable
On 5/6/2023 2:32 AM, longli@linuxonhyperv.com wrote:
> From: Long Li <longli@microsoft.com>
>
> When sending TX packets, we don't need a completion for every packet sent.
> If packets are sent in a series, the completion of the last packet can be
> used to indicate completion of all prior packets.
>
> Signed-off-by: Long Li <longli@microsoft.com>
>
Applied to dpdk-next-net/main, thanks.
Dropped stable tag in next-net, can add back again based on discussion
with LTS maintainers.
^ permalink raw reply [flat|nested] 7+ messages in thread
* EXTERNAL] [PATCH ] Add checks for the port capabilties
2023-05-06 1:32 ` [PATCH v2] " longli
2023-05-19 16:34 ` Ferruh Yigit
2023-05-19 16:40 ` Ferruh Yigit
@ 2023-05-26 0:19 ` Ajay Sharma
2023-05-26 3:30 ` Stephen Hemminger
2 siblings, 1 reply; 7+ messages in thread
From: Ajay Sharma @ 2023-05-26 0:19 UTC (permalink / raw)
To: Ferruh Yigit, Andrew Rybchenko; +Cc: dev, Long Li, stable, Ajay Sharma
From: Ajay Sharma <sharmaajay@microsoft.com>
Date: Mon, 22 May 2023 20:24:39 -0700
Subject: [PATCH] Add checks for the port capabilties
Add checks to the values retrieved from the firmware to prevent
overflow.
Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com>
---
drivers/net/mana/mana.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c
index 7630118d4f..3a7e302c86 100644
--- a/drivers/net/mana/mana.c
+++ b/drivers/net/mana/mana.c
@@ -292,8 +292,9 @@ mana_dev_info_get(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = MAX_FRAME_SIZE;
- dev_info->max_rx_queues = priv->max_rx_queues;
- dev_info->max_tx_queues = priv->max_tx_queues;
+ dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, USHRT_MAX);
+ dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, USHRT_MAX);
+
dev_info->max_mac_addrs = MANA_MAX_MAC_ADDR;
dev_info->max_hash_mac_addrs = 0;
@@ -334,16 +335,17 @@ mana_dev_info_get(struct rte_eth_dev *dev,
/* Buffer limits */
dev_info->rx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE;
- dev_info->rx_desc_lim.nb_max = priv->max_rx_desc;
+ dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, USHRT_MAX);
dev_info->rx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE;
- dev_info->rx_desc_lim.nb_seg_max = priv->max_recv_sge;
- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge;
+ dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(priv->max_recv_sge, USHRT_MAX);
+ dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(priv->max_recv_sge, USHRT_MAX);
+
dev_info->tx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE;
- dev_info->tx_desc_lim.nb_max = priv->max_tx_desc;
+ dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, USHRT_MAX);
dev_info->tx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE;
- dev_info->tx_desc_lim.nb_seg_max = priv->max_send_sge;
- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge;
+ dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(priv->max_send_sge, USHRT_MAX);
+ dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(priv->max_recv_sge, USHRT_MAX);
/* Speed */
dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
--
2.25.1
56
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: EXTERNAL] [PATCH ] Add checks for the port capabilties
2023-05-26 0:19 ` EXTERNAL] [PATCH ] Add checks for the port capabilties Ajay Sharma
@ 2023-05-26 3:30 ` Stephen Hemminger
2023-05-29 1:41 ` [EXTERNAL] " Ajay Sharma
0 siblings, 1 reply; 7+ messages in thread
From: Stephen Hemminger @ 2023-05-26 3:30 UTC (permalink / raw)
To: Ajay Sharma; +Cc: Ferruh Yigit, Andrew Rybchenko, dev, Long Li, stable
On Fri, 26 May 2023 00:19:59 +0000
Ajay Sharma <sharmaajay@microsoft.com> wrote:
> + dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, USHRT_MAX);
> + dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, USHRT_MAX);
> +
Please use UINT16_MAX instead of USHRT_MAX since that is the type of max_rx_queues.
Both are the same size but best to be consistent.
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [EXTERNAL] Re: EXTERNAL] [PATCH ] Add checks for the port capabilties
2023-05-26 3:30 ` Stephen Hemminger
@ 2023-05-29 1:41 ` Ajay Sharma
0 siblings, 0 replies; 7+ messages in thread
From: Ajay Sharma @ 2023-05-29 1:41 UTC (permalink / raw)
To: Stephen Hemminger
Cc: Ferruh Yigit, Andrew Rybchenko, dev, Long Li, stable, Ajay Sharma
From 1290db88b8748085c9f09a58b336b8c757442b87 Mon Sep 17 00:00:00 2001
From: Ajay Sharma <sharmaajay@microsoft.com>
Date: Sun, 28 May 2023 18:31:59 -0700
Subject: [PATCH] Change USHRT_MAX to UINT16_MAX
---
drivers/net/mana/mana.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c
index 3a7e302c86..a39d6798bf 100644
--- a/drivers/net/mana/mana.c
+++ b/drivers/net/mana/mana.c
@@ -292,8 +292,8 @@ mana_dev_info_get(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = MAX_FRAME_SIZE;
- dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, USHRT_MAX);
- dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, USHRT_MAX);
+ dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, UINT16_MAX);
+ dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, UINT16_MAX);
dev_info->max_mac_addrs = MANA_MAX_MAC_ADDR;
@@ -335,17 +335,17 @@ mana_dev_info_get(struct rte_eth_dev *dev,
/* Buffer limits */
dev_info->rx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE;
- dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, USHRT_MAX);
+ dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, UINT16_MAX);
dev_info->rx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE;
- dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(priv->max_recv_sge, USHRT_MAX);
- dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(priv->max_recv_sge, USHRT_MAX);
+ dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(priv->max_recv_sge, UINT16_MAX);
+ dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(priv->max_recv_sge, UINT16_MAX);
dev_info->tx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE;
- dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, USHRT_MAX);
+ dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, UINT16_MAX);
dev_info->tx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE;
- dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(priv->max_send_sge, USHRT_MAX);
- dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(priv->max_recv_sge, USHRT_MAX);
+ dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(priv->max_send_sge, UINT16_MAX);
+ dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(priv->max_recv_sge, UINT16_MAX);
/* Speed */
dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
--
2.25.1
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Thursday, May 25, 2023 8:31 PM
> To: Ajay Sharma <sharmaajay@microsoft.com>
> Cc: Ferruh Yigit <ferruh.yigit@amd.com>; Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru>; dev@dpdk.org; Long Li
> <longli@microsoft.com>; stable@dpdk.org
> Subject: [EXTERNAL] Re: EXTERNAL] [PATCH ] Add checks for the port
> capabilties
>
> On Fri, 26 May 2023 00:19:59 +0000
> Ajay Sharma <sharmaajay@microsoft.com> wrote:
>
> > + dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues,
> USHRT_MAX);
> > + dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues,
> USHRT_MAX);
> > +
>
> Please use UINT16_MAX instead of USHRT_MAX since that is the type of
> max_rx_queues.
> Both are the same size but best to be consistent.
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2023-05-29 1:41 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-28 3:36 [PATCH] net/mana: suppress TX CQE generation whenever possible longli
2023-05-06 1:32 ` [PATCH v2] " longli
2023-05-19 16:34 ` Ferruh Yigit
2023-05-19 16:40 ` Ferruh Yigit
2023-05-26 0:19 ` EXTERNAL] [PATCH ] Add checks for the port capabilties Ajay Sharma
2023-05-26 3:30 ` Stephen Hemminger
2023-05-29 1:41 ` [EXTERNAL] " Ajay Sharma
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).