* [dpdk-dev] [PATCH 1/4] net/fm10k: cleanup Tx buffers
2019-09-26 9:29 [dpdk-dev] [PATCH 0/4] drivers/net: cleanup Tx buffers Di ChenxuX
@ 2019-09-26 9:29 ` Di ChenxuX
2019-09-26 10:37 ` Gavin Hu (Arm Technology China)
2019-09-26 9:29 ` [dpdk-dev] [PATCH 2/4] net/i40e: " Di ChenxuX
` (2 subsequent siblings)
3 siblings, 1 reply; 8+ messages in thread
From: Di ChenxuX @ 2019-09-26 9:29 UTC (permalink / raw)
To: dev; +Cc: qiming.yang, Di ChenxuX
Add support to the fm10k driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.
Signed-off-by: Di ChenxuX <chenxux.di@intel.com>
---
drivers/net/fm10k/fm10k.h | 2 ++
drivers/net/fm10k/fm10k_ethdev.c | 1 +
drivers/net/fm10k/fm10k_rxtx.c | 45 ++++++++++++++++++++++++++++++++
3 files changed, 48 insertions(+)
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 916b856ac..ddb1d64ec 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -342,6 +342,8 @@ uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int fm10k_tx_done_cleanup(void *txq, uint32_t free_cnt);
+
int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq);
int fm10k_rx_vec_condition_check(struct rte_eth_dev *);
void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq);
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index db4d72129..328468185 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -2838,6 +2838,7 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
.reta_query = fm10k_reta_query,
.rss_hash_update = fm10k_rss_hash_update,
.rss_hash_conf_get = fm10k_rss_hash_conf_get,
+ .tx_done_cleanup = fm10k_tx_done_cleanup,
};
static int ftag_check_handler(__rte_unused const char *key,
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 5c3112183..f67c5bf00 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -541,6 +541,51 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
}
}
+int fm10k_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct fm10k_tx_queue *q = (struct fm10k_tx_queue *)txq;
+ uint16_t next_rs, count = 0;
+
+ if (q == NULL)
+ return -ENODEV;
+
+ next_rs = fifo_peek(&q->rs_tracker);
+ if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
+ return count;
+
+ /* the DONE flag is set on this descriptor so remove the ID
+ * from the RS bit tracker and free the buffers
+ */
+ fifo_remove(&q->rs_tracker);
+
+ /* wrap around? if so, free buffers from last_free up to but NOT
+ * including nb_desc
+ */
+ if (q->last_free > next_rs) {
+ count = q->nb_desc - q->last_free;
+ tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
+ q->last_free = 0;
+
+ if (unlikely(count == (int)free_cnt))
+ return count;
+ }
+
+ /* adjust free descriptor count before the next loop */
+ q->nb_free += count + (next_rs + 1 - q->last_free);
+
+ /* free buffers from last_free, up to and including next_rs */
+ if (q->last_free <= next_rs) {
+ count = next_rs - q->last_free + 1;
+ tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
+ q->last_free += count;
+ }
+
+ if (q->last_free == q->nb_desc)
+ q->last_free = 0;
+
+ return count;
+}
+
static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
{
uint16_t next_rs, count = 0;
--
2.17.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [dpdk-dev] [PATCH 1/4] net/fm10k: cleanup Tx buffers
2019-09-26 9:29 ` [dpdk-dev] [PATCH 1/4] net/fm10k: " Di ChenxuX
@ 2019-09-26 10:37 ` Gavin Hu (Arm Technology China)
0 siblings, 0 replies; 8+ messages in thread
From: Gavin Hu (Arm Technology China) @ 2019-09-26 10:37 UTC (permalink / raw)
To: Di ChenxuX, dev; +Cc: qiming.yang, nd
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Di ChenxuX
> Sent: Thursday, September 26, 2019 5:30 PM
> To: dev@dpdk.org
> Cc: qiming.yang@intel.com; Di ChenxuX <chenxux.di@intel.com>
> Subject: [dpdk-dev] [PATCH 1/4] net/fm10k: cleanup Tx buffers
>
> Add support to the fm10k driver for the API rte_eth_tx_done_cleanup
> to force free consumed buffers on Tx ring.
>
> Signed-off-by: Di ChenxuX <chenxux.di@intel.com>
> ---
> drivers/net/fm10k/fm10k.h | 2 ++
> drivers/net/fm10k/fm10k_ethdev.c | 1 +
> drivers/net/fm10k/fm10k_rxtx.c | 45
> ++++++++++++++++++++++++++++++++
> 3 files changed, 48 insertions(+)
>
> diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
> index 916b856ac..ddb1d64ec 100644
> --- a/drivers/net/fm10k/fm10k.h
> +++ b/drivers/net/fm10k/fm10k.h
> @@ -342,6 +342,8 @@ uint16_t fm10k_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts);
>
> +int fm10k_tx_done_cleanup(void *txq, uint32_t free_cnt);
> +
> int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq);
> int fm10k_rx_vec_condition_check(struct rte_eth_dev *);
> void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq);
> diff --git a/drivers/net/fm10k/fm10k_ethdev.c
> b/drivers/net/fm10k/fm10k_ethdev.c
> index db4d72129..328468185 100644
> --- a/drivers/net/fm10k/fm10k_ethdev.c
> +++ b/drivers/net/fm10k/fm10k_ethdev.c
> @@ -2838,6 +2838,7 @@ static const struct eth_dev_ops
> fm10k_eth_dev_ops = {
> .reta_query = fm10k_reta_query,
> .rss_hash_update = fm10k_rss_hash_update,
> .rss_hash_conf_get = fm10k_rss_hash_conf_get,
> + .tx_done_cleanup = fm10k_tx_done_cleanup,
> };
>
> static int ftag_check_handler(__rte_unused const char *key,
> diff --git a/drivers/net/fm10k/fm10k_rxtx.c
> b/drivers/net/fm10k/fm10k_rxtx.c
> index 5c3112183..f67c5bf00 100644
> --- a/drivers/net/fm10k/fm10k_rxtx.c
> +++ b/drivers/net/fm10k/fm10k_rxtx.c
> @@ -541,6 +541,51 @@ static inline void tx_free_bulk_mbuf(struct
> rte_mbuf **txep, int num)
> }
> }
>
> +int fm10k_tx_done_cleanup(void *txq, uint32_t free_cnt)
> +{
> + struct fm10k_tx_queue *q = (struct fm10k_tx_queue *)txq;
> + uint16_t next_rs, count = 0;
"count" should be declared as uint32_t to compare against free_cnt.
/Gavin
> +
> + if (q == NULL)
> + return -ENODEV;
> +
> + next_rs = fifo_peek(&q->rs_tracker);
> + if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
> + return count;
> +
> + /* the DONE flag is set on this descriptor so remove the ID
> + * from the RS bit tracker and free the buffers
> + */
> + fifo_remove(&q->rs_tracker);
> +
> + /* wrap around? if so, free buffers from last_free up to but NOT
> + * including nb_desc
> + */
> + if (q->last_free > next_rs) {
> + count = q->nb_desc - q->last_free;
> + tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
> + q->last_free = 0;
> +
> + if (unlikely(count == (int)free_cnt))
> + return count;
> + }
> +
> + /* adjust free descriptor count before the next loop */
> + q->nb_free += count + (next_rs + 1 - q->last_free);
> +
> + /* free buffers from last_free, up to and including next_rs */
> + if (q->last_free <= next_rs) {
> + count = next_rs - q->last_free + 1;
> + tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
> + q->last_free += count;
> + }
> +
> + if (q->last_free == q->nb_desc)
> + q->last_free = 0;
> +
> + return count;
> +}
> +
> static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
> {
> uint16_t next_rs, count = 0;
> --
> 2.17.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH 2/4] net/i40e: cleanup Tx buffers
2019-09-26 9:29 [dpdk-dev] [PATCH 0/4] drivers/net: cleanup Tx buffers Di ChenxuX
2019-09-26 9:29 ` [dpdk-dev] [PATCH 1/4] net/fm10k: " Di ChenxuX
@ 2019-09-26 9:29 ` Di ChenxuX
2019-09-26 10:36 ` Gavin Hu (Arm Technology China)
2019-09-26 9:29 ` [dpdk-dev] [PATCH 3/4] net/ice: " Di ChenxuX
2019-09-26 9:29 ` [dpdk-dev] [PATCH 4/4] net/ixgbe: " Di ChenxuX
3 siblings, 1 reply; 8+ messages in thread
From: Di ChenxuX @ 2019-09-26 9:29 UTC (permalink / raw)
To: dev; +Cc: qiming.yang, Di ChenxuX
Add support to the i40e driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.
Signed-off-by: Di ChenxuX <chenxux.di@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev_vf.c | 1 +
drivers/net/i40e/i40e_rxtx.c | 42 +++++++++++++++++++++++++++++++
drivers/net/i40e/i40e_rxtx.h | 1 +
4 files changed, 45 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4e40b7ab5..cf35fb5da 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -509,6 +509,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
.tm_ops_get = i40e_tm_ops_get,
+ .tx_done_cleanup = i40e_tx_done_cleanup,
};
/* store statistics names and its offset in stats structure */
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index c77b30c54..b462a9d8c 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -215,6 +215,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
.mtu_set = i40evf_dev_mtu_set,
.mac_addr_set = i40evf_set_default_mac_addr,
+ .tx_done_cleanup = i40e_tx_done_cleanup,
};
/*
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 692c3bab4..29793a1b7 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1416,6 +1416,8 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
break;
}
+ //eth_i40e_tx_done_cleanup(tx_queue,5);
+
return nb_tx;
}
@@ -2467,6 +2469,46 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
}
}
+int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
+ struct i40e_tx_entry *sw_ring;
+ uint16_t tx_id; /* Current segment being processed. */
+ uint16_t tx_cleaned;
+
+ int count = 0;
+
+ if (q == NULL)
+ return -ENODEV;
+
+ sw_ring = q->sw_ring;
+ tx_cleaned = q->last_desc_cleaned;
+ tx_id = sw_ring[q->last_desc_cleaned].next_id;
+ if ((q->tx_ring[tx_id].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ do {
+ if (sw_ring[tx_id].mbuf == NULL)
+ break;
+
+ rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ sw_ring[tx_id].mbuf = NULL;
+ sw_ring[tx_id].last_id = tx_id;
+
+ /* Move to next segemnt. */
+ tx_cleaned = tx_id;
+ tx_id = sw_ring[tx_id].next_id;
+ count++;
+ } while (count != (int)free_cnt);
+
+ q->nb_tx_free += (uint16_t)count;
+ q->last_desc_cleaned = tx_cleaned;
+
+ return count;
+}
+
void
i40e_reset_tx_queue(struct i40e_tx_queue *txq)
{
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 3fc619af9..1a70eda2c 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -204,6 +204,7 @@ void i40e_dev_free_queues(struct rte_eth_dev *dev);
void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
+int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
--
2.17.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [dpdk-dev] [PATCH 2/4] net/i40e: cleanup Tx buffers
2019-09-26 9:29 ` [dpdk-dev] [PATCH 2/4] net/i40e: " Di ChenxuX
@ 2019-09-26 10:36 ` Gavin Hu (Arm Technology China)
0 siblings, 0 replies; 8+ messages in thread
From: Gavin Hu (Arm Technology China) @ 2019-09-26 10:36 UTC (permalink / raw)
To: Di ChenxuX, dev; +Cc: qiming.yang, nd
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Di ChenxuX
> Sent: Thursday, September 26, 2019 5:30 PM
> To: dev@dpdk.org
> Cc: qiming.yang@intel.com; Di ChenxuX <chenxux.di@intel.com>
> Subject: [dpdk-dev] [PATCH 2/4] net/i40e: cleanup Tx buffers
>
> Add support to the i40e driver for the API rte_eth_tx_done_cleanup
> to force free consumed buffers on Tx ring.
>
> Signed-off-by: Di ChenxuX <chenxux.di@intel.com>
> ---
> drivers/net/i40e/i40e_ethdev.c | 1 +
> drivers/net/i40e/i40e_ethdev_vf.c | 1 +
> drivers/net/i40e/i40e_rxtx.c | 42 +++++++++++++++++++++++++++++++
> drivers/net/i40e/i40e_rxtx.h | 1 +
> 4 files changed, 45 insertions(+)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 4e40b7ab5..cf35fb5da 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -509,6 +509,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops =
> {
> .mac_addr_set = i40e_set_default_mac_addr,
> .mtu_set = i40e_dev_mtu_set,
> .tm_ops_get = i40e_tm_ops_get,
> + .tx_done_cleanup = i40e_tx_done_cleanup,
> };
>
> /* store statistics names and its offset in stats structure */
> diff --git a/drivers/net/i40e/i40e_ethdev_vf.c
> b/drivers/net/i40e/i40e_ethdev_vf.c
> index c77b30c54..b462a9d8c 100644
> --- a/drivers/net/i40e/i40e_ethdev_vf.c
> +++ b/drivers/net/i40e/i40e_ethdev_vf.c
> @@ -215,6 +215,7 @@ static const struct eth_dev_ops
> i40evf_eth_dev_ops = {
> .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
> .mtu_set = i40evf_dev_mtu_set,
> .mac_addr_set = i40evf_set_default_mac_addr,
> + .tx_done_cleanup = i40e_tx_done_cleanup,
> };
>
> /*
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
> index 692c3bab4..29793a1b7 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -1416,6 +1416,8 @@ i40e_xmit_pkts_vec(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> break;
> }
>
> + //eth_i40e_tx_done_cleanup(tx_queue,5);
> +
> return nb_tx;
> }
>
> @@ -2467,6 +2469,46 @@ i40e_tx_queue_release_mbufs(struct
> i40e_tx_queue *txq)
> }
> }
>
> +int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt)
> +{
> + struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
> + struct i40e_tx_entry *sw_ring;
> + uint16_t tx_id; /* Current segment being processed. */
> + uint16_t tx_cleaned;
> +
> + int count = 0;
uint32_t is better, then you will not get trouble into a negative value.
> +
> + if (q == NULL)
> + return -ENODEV;
> +
> + sw_ring = q->sw_ring;
> + tx_cleaned = q->last_desc_cleaned;
> + tx_id = sw_ring[q->last_desc_cleaned].next_id;
> + if ((q->tx_ring[tx_id].cmd_type_offset_bsz &
> +
> rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> +
> rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> + return 0;
> +
> + do {
> + if (sw_ring[tx_id].mbuf == NULL)
> + break;
> +
> + rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
> + sw_ring[tx_id].mbuf = NULL;
> + sw_ring[tx_id].last_id = tx_id;
> +
> + /* Move to next segemnt. */
> + tx_cleaned = tx_id;
> + tx_id = sw_ring[tx_id].next_id;
> + count++;
> + } while (count != (int)free_cnt);
This force conversation is not needed if "count" declared as uint32_t above.
/Gavin
> +
> + q->nb_tx_free += (uint16_t)count;
> + q->last_desc_cleaned = tx_cleaned;
> +
> + return count;
> +}
> +
> void
> i40e_reset_tx_queue(struct i40e_tx_queue *txq)
> {
> diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
> index 3fc619af9..1a70eda2c 100644
> --- a/drivers/net/i40e/i40e_rxtx.h
> +++ b/drivers/net/i40e/i40e_rxtx.h
> @@ -204,6 +204,7 @@ void i40e_dev_free_queues(struct rte_eth_dev
> *dev);
> void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
> void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
> void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
> +int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
> int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
> void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
>
> --
> 2.17.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH 3/4] net/ice: cleanup Tx buffers
2019-09-26 9:29 [dpdk-dev] [PATCH 0/4] drivers/net: cleanup Tx buffers Di ChenxuX
2019-09-26 9:29 ` [dpdk-dev] [PATCH 1/4] net/fm10k: " Di ChenxuX
2019-09-26 9:29 ` [dpdk-dev] [PATCH 2/4] net/i40e: " Di ChenxuX
@ 2019-09-26 9:29 ` Di ChenxuX
2019-09-26 9:29 ` [dpdk-dev] [PATCH 4/4] net/ixgbe: " Di ChenxuX
3 siblings, 0 replies; 8+ messages in thread
From: Di ChenxuX @ 2019-09-26 9:29 UTC (permalink / raw)
To: dev; +Cc: qiming.yang, Di ChenxuX
Add support to the ice driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.
Signed-off-by: Di ChenxuX <chenxux.di@intel.com>
---
drivers/net/ice/ice_ethdev.c | 1 +
drivers/net/ice/ice_rxtx.c | 41 ++++++++++++++++++++++++++++++++++++
drivers/net/ice/ice_rxtx.h | 1 +
3 files changed, 43 insertions(+)
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 63997fdfb..617f7b2ac 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -160,6 +160,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
.filter_ctrl = ice_dev_filter_ctrl,
.udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
+ .tx_done_cleanup = ice_tx_done_cleanup,
};
/* store statistics names and its offset in stats structure */
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 81af81441..f991cb6c0 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -579,6 +579,47 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return 0;
}
+
+int ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
+ struct ice_tx_entry *sw_ring;
+ uint16_t tx_id; /* Current segment being processed. */
+ uint16_t tx_cleaned;
+
+ int count = 0;
+
+ if (q == NULL)
+ return -ENODEV;
+
+ sw_ring = q->sw_ring;
+ tx_cleaned = q->last_desc_cleaned;
+ tx_id = sw_ring[q->last_desc_cleaned].next_id;
+ if ((q->tx_ring[tx_id].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
+ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ do {
+ if (sw_ring[tx_id].mbuf == NULL)
+ break;
+
+ rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ sw_ring[tx_id].mbuf = NULL;
+ sw_ring[tx_id].last_id = tx_id;
+
+ /* Move to next segemnt. */
+ tx_cleaned = tx_id;
+ tx_id = sw_ring[tx_id].next_id;
+ count++;
+ } while (count != (int)free_cnt);
+
+ q->nb_tx_free += (uint16_t)count;
+ q->last_desc_cleaned = tx_cleaned;
+
+ return count;
+}
+
int
ice_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e9214110c..1ac3f3f91 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -170,6 +170,7 @@ int ice_rx_descriptor_status(void *rx_queue, uint16_t offset);
int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
void ice_set_default_ptype_table(struct rte_eth_dev *dev);
const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
--
2.17.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH 4/4] net/ixgbe: cleanup Tx buffers
2019-09-26 9:29 [dpdk-dev] [PATCH 0/4] drivers/net: cleanup Tx buffers Di ChenxuX
` (2 preceding siblings ...)
2019-09-26 9:29 ` [dpdk-dev] [PATCH 3/4] net/ice: " Di ChenxuX
@ 2019-09-26 9:29 ` Di ChenxuX
3 siblings, 0 replies; 8+ messages in thread
From: Di ChenxuX @ 2019-09-26 9:29 UTC (permalink / raw)
To: dev; +Cc: qiming.yang, Di ChenxuX
Add support to the ixgbe driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.
Signed-off-by: Di ChenxuX <chenxux.di@intel.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 2 ++
drivers/net/ixgbe/ixgbe_rxtx.c | 39 ++++++++++++++++++++++++++++++++
drivers/net/ixgbe/ixgbe_rxtx.h | 2 ++
3 files changed, 43 insertions(+)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 7eb3d0567..255af2290 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -591,6 +591,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del,
.tm_ops_get = ixgbe_tm_ops_get,
+ .tx_done_cleanup = ixgbe_tx_done_cleanup,
};
/*
@@ -639,6 +640,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.reta_query = ixgbe_dev_rss_reta_query,
.rss_hash_update = ixgbe_dev_rss_hash_update,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
+ .tx_done_cleanup = ixgbe_tx_done_cleanup,
};
/* store statistics names and its offset in stats structure */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index edcfa60ce..7bdb244b0 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2306,6 +2306,45 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
}
}
+int ixgbe_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct ixgbe_tx_queue *q = (struct ixgbe_tx_queue *)txq;
+ struct ixgbe_tx_entry *sw_ring;
+ uint16_t tx_id; /* Current segment being processed. */
+ uint16_t tx_cleaned;
+
+ int count = 0;
+
+ if (q == NULL)
+ return -ENODEV;
+
+ sw_ring = q->sw_ring;
+ tx_cleaned = q->last_desc_cleaned;
+ tx_id = sw_ring[q->last_desc_cleaned].next_id;
+ if (!(q->tx_ring[tx_id].wb.status &
+ IXGBE_TXD_STAT_DD))
+ return 0;
+
+ do {
+ if (sw_ring[tx_id].mbuf == NULL)
+ break;
+
+ rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ sw_ring[tx_id].mbuf = NULL;
+ sw_ring[tx_id].last_id = tx_id;
+
+ /* Move to next segemnt. */
+ tx_cleaned = tx_id;
+ tx_id = sw_ring[tx_id].next_id;
+ count++;
+ } while (count != (int)free_cnt);
+
+ q->nb_tx_free += (uint16_t)count;
+ q->last_desc_cleaned = tx_cleaned;
+
+ return count;
+}
+
static void __attribute__((cold))
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 505d344b9..2c3770af6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -285,6 +285,8 @@ int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
+int ixgbe_tx_done_cleanup(void *txq, uint32_t free_cnt);
+
extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
--
2.17.1
^ permalink raw reply [flat|nested] 8+ messages in thread