* [dpdk-dev] [PATCH v3] net/axgbe: add support for Scattered Rx
@ 2020-03-05 5:25 ssebasti
2020-03-05 9:06 ` Ferruh Yigit
2020-03-05 10:26 ` Kumar, Ravi1
0 siblings, 2 replies; 4+ messages in thread
From: ssebasti @ 2020-03-05 5:25 UTC (permalink / raw)
To: dev; +Cc: Ravi1.Kumar, ferruh.yigit
From: Selwin Sebastian <selwin.sebastian@amd.com>
Enable scattered rx support and add jumbo packet receive capability
Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
---
doc/guides/nics/features/axgbe.ini | 1 +
drivers/net/axgbe/axgbe_common.h | 2 +
drivers/net/axgbe/axgbe_ethdev.c | 16 +++-
drivers/net/axgbe/axgbe_rxtx.c | 145 +++++++++++++++++++++++++++++
drivers/net/axgbe/axgbe_rxtx.h | 2 +
5 files changed, 165 insertions(+), 1 deletion(-)
diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
index ab4da559f..0becaa097 100644
--- a/doc/guides/nics/features/axgbe.ini
+++ b/doc/guides/nics/features/axgbe.ini
@@ -7,6 +7,7 @@
Speed capabilities = Y
Link status = Y
Jumbo frame = Y
+Scattered Rx = Y
Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index fdb037dd5..fbd46150c 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -1135,6 +1135,8 @@
#define RX_NORMAL_DESC3_PL_WIDTH 14
#define RX_NORMAL_DESC3_RSV_INDEX 26
#define RX_NORMAL_DESC3_RSV_WIDTH 1
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_DESC3_L34T_IPV4_TCP 1
#define RX_DESC3_L34T_IPV4_UDP 2
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index d0b6f091f..013c6330d 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -249,6 +249,8 @@ axgbe_dev_start(struct rte_eth_dev *dev)
{
struct axgbe_port *pdata = dev->data->dev_private;
int ret;
+ struct rte_eth_dev_data *dev_data = dev->data;
+ uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
PMD_INIT_FUNC_TRACE();
@@ -279,6 +281,17 @@ axgbe_dev_start(struct rte_eth_dev *dev)
axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ max_pkt_len > pdata->rx_buf_size)
+ dev_data->scattered_rx = 1;
+
+ /* Scatter Rx handling */
+ if (dev_data->scattered_rx)
+ dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
+ else
+ dev->rx_pkt_burst = &axgbe_recv_pkts;
+
return 0;
}
@@ -789,6 +802,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
@@ -1020,7 +1035,6 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
int ret;
eth_dev->dev_ops = &axgbe_eth_dev_ops;
- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
/*
* For secondary processes, we don't initialise any further as primary
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 96055c25b..8f818eb89 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -307,6 +307,151 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint16_t nb_rx = 0;
+ struct axgbe_rx_queue *rxq = rx_queue;
+ volatile union axgbe_rx_desc *desc;
+
+ uint64_t old_dirty = rxq->dirty;
+ struct rte_mbuf *first_seg = NULL;
+ struct rte_mbuf *mbuf, *tmbuf;
+ unsigned int err;
+ uint32_t error_status;
+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
+
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+ while (nb_rx < nb_pkts) {
+ bool eop = 0;
+next_desc:
+ if (unlikely(idx == rxq->nb_desc))
+ idx = 0;
+
+ desc = &rxq->desc[idx];
+
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+ break;
+
+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!tmbuf)) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u\n",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ pidx = idx + 1;
+ if (unlikely(pidx == rxq->nb_desc))
+ pidx = 0;
+
+ rte_prefetch0(rxq->sw_ring[pidx]);
+ if ((pidx & 0x3) == 0) {
+ rte_prefetch0(&rxq->desc[pidx]);
+ rte_prefetch0(&rxq->sw_ring[pidx]);
+ }
+
+ mbuf = rxq->sw_ring[idx];
+ /* Check for any errors and free mbuf*/
+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ES);
+ error_status = 0;
+ if (unlikely(err)) {
+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+ if ((error_status != AXGBE_L3_CSUM_ERR)
+ && (error_status != AXGBE_L4_CSUM_ERR)) {
+ rxq->errors++;
+ rte_pktmbuf_free(mbuf);
+ goto err_set;
+ }
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+
+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, LD)) {
+ eop = 0;
+ pkt_len = rxq->buf_size;
+ data_len = pkt_len;
+ } else {
+ eop = 1;
+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, PL);
+ data_len = pkt_len - rxq->crc_len;
+ }
+
+ if (first_seg != NULL) {
+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
+ rte_mempool_put(rxq->mb_pool,
+ first_seg);
+ } else {
+ first_seg = mbuf;
+ }
+
+ /* Get the RSS hash */
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+
+ /* Mbuf populate */
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->data_len = data_len;
+
+err_set:
+ rxq->cur++;
+ rxq->sw_ring[idx++] = tmbuf;
+ desc->read.baddr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+ memset((void *)(&desc->read.desc2), 0, 8);
+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+ rxq->dirty++;
+
+ if (!eop) {
+ rte_pktmbuf_free(mbuf);
+ goto next_desc;
+ }
+
+ first_seg->pkt_len = pkt_len;
+ rxq->bytes += pkt_len;
+ mbuf->next = NULL;
+
+ first_seg->port = rxq->port_id;
+ if (rxq->pdata->rx_csum_enable) {
+ mbuf->ol_flags = 0;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else if (unlikely(error_status
+ == AXGBE_L4_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+
+ rx_pkts[nb_rx++] = first_seg;
+
+ /* Setup receipt context for a new packet.*/
+ first_seg = NULL;
+ }
+
+ /* Save receive context.*/
+ rxq->pkts += nb_rx;
+
+ if (rxq->dirty != old_dirty) {
+ rte_wmb();
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (idx * sizeof(union axgbe_rx_desc))));
+ }
+ return nb_rx;
+}
+
/* Tx Apis */
static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
{
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
index a21537df9..f6796b09b 100644
--- a/drivers/net/axgbe/axgbe_rxtx.h
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
--
2.17.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [PATCH v3] net/axgbe: add support for Scattered Rx
2020-03-05 5:25 [dpdk-dev] [PATCH v3] net/axgbe: add support for Scattered Rx ssebasti
@ 2020-03-05 9:06 ` Ferruh Yigit
2020-03-05 10:26 ` Kumar, Ravi1
1 sibling, 0 replies; 4+ messages in thread
From: Ferruh Yigit @ 2020-03-05 9:06 UTC (permalink / raw)
To: ssebasti, dev; +Cc: Ravi1.Kumar
On 3/5/2020 5:25 AM, ssebasti@amd.com wrote:
> From: Selwin Sebastian <selwin.sebastian@amd.com>
>
> Enable scattered rx support and add jumbo packet receive capability
>
> Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
Hi Selwin,
Looks good to me, I will wait Ravi's ack/review for merge.
Thanks,
ferruh
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [PATCH v3] net/axgbe: add support for Scattered Rx
2020-03-05 5:25 [dpdk-dev] [PATCH v3] net/axgbe: add support for Scattered Rx ssebasti
2020-03-05 9:06 ` Ferruh Yigit
@ 2020-03-05 10:26 ` Kumar, Ravi1
2020-03-05 13:26 ` Ferruh Yigit
1 sibling, 1 reply; 4+ messages in thread
From: Kumar, Ravi1 @ 2020-03-05 10:26 UTC (permalink / raw)
To: Sebastian, Selwin, dev; +Cc: ferruh.yigit
[AMD Official Use Only - Internal Distribution Only]
Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
>From: Selwin Sebastian <selwin.sebastian@amd.com>
>
>Enable scattered rx support and add jumbo packet receive capability
>
>Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>---
> doc/guides/nics/features/axgbe.ini | 1 +
> drivers/net/axgbe/axgbe_common.h | 2 +
> drivers/net/axgbe/axgbe_ethdev.c | 16 +++-
> drivers/net/axgbe/axgbe_rxtx.c | 145 +++++++++++++++++++++++++++++
> drivers/net/axgbe/axgbe_rxtx.h | 2 +
> 5 files changed, 165 insertions(+), 1 deletion(-)
>
>diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
>index ab4da559f..0becaa097 100644
>--- a/doc/guides/nics/features/axgbe.ini
>+++ b/doc/guides/nics/features/axgbe.ini
>@@ -7,6 +7,7 @@
> Speed capabilities = Y
> Link status = Y
> Jumbo frame = Y
>+Scattered Rx = Y
> Promiscuous mode = Y
> Allmulticast mode = Y
> RSS hash = Y
>diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
>index fdb037dd5..fbd46150c 100644
>--- a/drivers/net/axgbe/axgbe_common.h
>+++ b/drivers/net/axgbe/axgbe_common.h
>@@ -1135,6 +1135,8 @@
> #define RX_NORMAL_DESC3_PL_WIDTH 14
> #define RX_NORMAL_DESC3_RSV_INDEX 26
> #define RX_NORMAL_DESC3_RSV_WIDTH 1
>+#define RX_NORMAL_DESC3_LD_INDEX 28
>+#define RX_NORMAL_DESC3_LD_WIDTH 1
>
> #define RX_DESC3_L34T_IPV4_TCP 1
> #define RX_DESC3_L34T_IPV4_UDP 2
>diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
>index d0b6f091f..013c6330d 100644
>--- a/drivers/net/axgbe/axgbe_ethdev.c
>+++ b/drivers/net/axgbe/axgbe_ethdev.c
>@@ -249,6 +249,8 @@ axgbe_dev_start(struct rte_eth_dev *dev) {
> struct axgbe_port *pdata = dev->data->dev_private;
> int ret;
>+ struct rte_eth_dev_data *dev_data = dev->data;
>+ uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
>
> PMD_INIT_FUNC_TRACE();
>
>@@ -279,6 +281,17 @@ axgbe_dev_start(struct rte_eth_dev *dev)
>
> axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
> axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
>+
>+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
>+ max_pkt_len > pdata->rx_buf_size)
>+ dev_data->scattered_rx = 1;
>+
>+ /* Scatter Rx handling */
>+ if (dev_data->scattered_rx)
>+ dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
>+ else
>+ dev->rx_pkt_burst = &axgbe_recv_pkts;
>+
> return 0;
> }
>
>@@ -789,6 +802,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
>+ DEV_RX_OFFLOAD_JUMBO_FRAME |
>+ DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_KEEP_CRC;
>
> dev_info->tx_offload_capa =
>@@ -1020,7 +1035,6 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
> int ret;
>
> eth_dev->dev_ops = &axgbe_eth_dev_ops;
>- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>
> /*
> * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 96055c25b..8f818eb89 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.c
>+++ b/drivers/net/axgbe/axgbe_rxtx.c
>@@ -307,6 +307,151 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> return nb_rx;
> }
>
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
>+ PMD_INIT_FUNC_TRACE();
>+ uint16_t nb_rx = 0;
>+ struct axgbe_rx_queue *rxq = rx_queue;
>+ volatile union axgbe_rx_desc *desc;
>+
>+ uint64_t old_dirty = rxq->dirty;
>+ struct rte_mbuf *first_seg = NULL;
>+ struct rte_mbuf *mbuf, *tmbuf;
>+ unsigned int err;
>+ uint32_t error_status;
>+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
>+
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
>+ while (nb_rx < nb_pkts) {
>+ bool eop = 0;
>+next_desc:
>+ if (unlikely(idx == rxq->nb_desc))
>+ idx = 0;
>+
>+ desc = &rxq->desc[idx];
>+
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
>+ break;
>+
>+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
>+ if (unlikely(!tmbuf)) {
>+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
>+ " queue_id = %u\n",
>+ (unsigned int)rxq->port_id,
>+ (unsigned int)rxq->queue_id);
>+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
>+ break;
>+ }
>+
>+ pidx = idx + 1;
>+ if (unlikely(pidx == rxq->nb_desc))
>+ pidx = 0;
>+
>+ rte_prefetch0(rxq->sw_ring[pidx]);
>+ if ((pidx & 0x3) == 0) {
>+ rte_prefetch0(&rxq->desc[pidx]);
>+ rte_prefetch0(&rxq->sw_ring[pidx]);
>+ }
>+
>+ mbuf = rxq->sw_ring[idx];
>+ /* Check for any errors and free mbuf*/
>+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, ES);
>+ error_status = 0;
>+ if (unlikely(err)) {
>+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
>+ if ((error_status != AXGBE_L3_CSUM_ERR)
>+ && (error_status != AXGBE_L4_CSUM_ERR)) {
>+ rxq->errors++;
>+ rte_pktmbuf_free(mbuf);
>+ goto err_set;
>+ }
>+ }
>+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
>+
>+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, LD)) {
>+ eop = 0;
>+ pkt_len = rxq->buf_size;
>+ data_len = pkt_len;
>+ } else {
>+ eop = 1;
>+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, PL);
>+ data_len = pkt_len - rxq->crc_len;
>+ }
>+
>+ if (first_seg != NULL) {
>+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
>+ rte_mempool_put(rxq->mb_pool,
>+ first_seg);
>+ } else {
>+ first_seg = mbuf;
>+ }
>+
>+ /* Get the RSS hash */
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>+
>+ /* Mbuf populate */
>+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
>+ mbuf->data_len = data_len;
>+
>+err_set:
>+ rxq->cur++;
>+ rxq->sw_ring[idx++] = tmbuf;
>+ desc->read.baddr =
>+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
>+ memset((void *)(&desc->read.desc2), 0, 8);
>+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
>+ rxq->dirty++;
>+
>+ if (!eop) {
>+ rte_pktmbuf_free(mbuf);
>+ goto next_desc;
>+ }
>+
>+ first_seg->pkt_len = pkt_len;
>+ rxq->bytes += pkt_len;
>+ mbuf->next = NULL;
>+
>+ first_seg->port = rxq->port_id;
>+ if (rxq->pdata->rx_csum_enable) {
>+ mbuf->ol_flags = 0;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
>+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
>+ } else if (unlikely(error_status
>+ == AXGBE_L4_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
>+ }
>+ }
>+
>+ rx_pkts[nb_rx++] = first_seg;
>+
>+ /* Setup receipt context for a new packet.*/
>+ first_seg = NULL;
>+ }
>+
>+ /* Save receive context.*/
>+ rxq->pkts += nb_rx;
>+
>+ if (rxq->dirty != old_dirty) {
>+ rte_wmb();
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
>+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
>+ low32_value(rxq->ring_phys_addr +
>+ (idx * sizeof(union axgbe_rx_desc))));
>+ }
>+ return nb_rx;
>+}
>+
> /* Tx Apis */
> static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) { diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index a21537df9..f6796b09b 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.h
>+++ b/drivers/net/axgbe/axgbe_rxtx.h
>@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
> uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>--
>2.17.1
>
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [PATCH v3] net/axgbe: add support for Scattered Rx
2020-03-05 10:26 ` Kumar, Ravi1
@ 2020-03-05 13:26 ` Ferruh Yigit
0 siblings, 0 replies; 4+ messages in thread
From: Ferruh Yigit @ 2020-03-05 13:26 UTC (permalink / raw)
To: Kumar, Ravi1, Sebastian, Selwin, dev
On 3/5/2020 10:26 AM, Kumar, Ravi1 wrote:
<...>
>> From: Selwin Sebastian <selwin.sebastian@amd.com>
>>
>> Enable scattered rx support and add jumbo packet receive capability
>>
>> Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>
> Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
>
Applied to dpdk-next-net/master, thanks.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-03-05 13:26 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-05 5:25 [dpdk-dev] [PATCH v3] net/axgbe: add support for Scattered Rx ssebasti
2020-03-05 9:06 ` Ferruh Yigit
2020-03-05 10:26 ` Kumar, Ravi1
2020-03-05 13:26 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).