* [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
@ 2020-02-27 6:33 ssebasti
2020-02-27 14:28 ` Ferruh Yigit
2020-02-28 13:09 ` Ferruh Yigit
0 siblings, 2 replies; 11+ messages in thread
From: ssebasti @ 2020-02-27 6:33 UTC (permalink / raw)
To: dev
From: Selwin Sebastian <selwin.sebastian@amd.com>
Enable scattered rx support and add jumbo packet transmit capability
Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
---
doc/guides/nics/features/axgbe.ini | 1 +
drivers/net/axgbe/axgbe_common.h | 2 +
drivers/net/axgbe/axgbe_ethdev.c | 18 +++-
drivers/net/axgbe/axgbe_rxtx.c | 146 +++++++++++++++++++++++++++++
drivers/net/axgbe/axgbe_rxtx.h | 2 +
5 files changed, 168 insertions(+), 1 deletion(-)
diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
index ab4da559f..0becaa097 100644
--- a/doc/guides/nics/features/axgbe.ini
+++ b/doc/guides/nics/features/axgbe.ini
@@ -7,6 +7,7 @@
Speed capabilities = Y
Link status = Y
Jumbo frame = Y
+Scattered Rx = Y
Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index fdb037dd5..fbd46150c 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -1135,6 +1135,8 @@
#define RX_NORMAL_DESC3_PL_WIDTH 14
#define RX_NORMAL_DESC3_RSV_INDEX 26
#define RX_NORMAL_DESC3_RSV_WIDTH 1
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_DESC3_L34T_IPV4_TCP 1
#define RX_DESC3_L34T_IPV4_UDP 2
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index d0b6f091f..eb2f51f89 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_TSO |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_TCP_CKSUM;
if (pdata->hw_feat.rss) {
@@ -1018,9 +1024,19 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
uint32_t reg, mac_lo, mac_hi;
int ret;
+ struct rte_eth_dev_info dev_info = { 0 };
eth_dev->dev_ops = &axgbe_eth_dev_ops;
- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
+ eth_dev->dev_ops->dev_infos_get(eth_dev, &dev_info);
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
+
+ /* Scatter Rx handling */
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
+ else
+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
/*
* For secondary processes, we don't initialise any further as primary
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 96055c25b..57e2bbb34 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -307,6 +307,152 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint16_t nb_rx = 0;
+ struct axgbe_rx_queue *rxq = rx_queue;
+ volatile union axgbe_rx_desc *desc;
+
+ uint64_t old_dirty = rxq->dirty;
+ struct rte_mbuf *first_seg = NULL;
+ struct rte_mbuf *mbuf, *tmbuf;
+ unsigned int err;
+ uint32_t error_status;
+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
+
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+ while (nb_rx < nb_pkts) {
+ bool eop = 0;
+next_desc:
+ if (unlikely(idx == rxq->nb_desc))
+ idx = 0;
+
+ desc = &rxq->desc[idx];
+
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+ break;
+
+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!tmbuf)) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u\n",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ pidx = idx + 1;
+ if (unlikely(pidx == rxq->nb_desc))
+ pidx = 0;
+
+ rte_prefetch0(rxq->sw_ring[pidx]);
+ if ((pidx & 0x3) == 0) {
+ rte_prefetch0(&rxq->desc[pidx]);
+ rte_prefetch0(&rxq->sw_ring[pidx]);
+ }
+
+ mbuf = rxq->sw_ring[idx];
+ /* Check for any errors and free mbuf*/
+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ES);
+ error_status = 0;
+ if (unlikely(err)) {
+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+ if ((error_status != AXGBE_L3_CSUM_ERR)
+ && (error_status != AXGBE_L4_CSUM_ERR)) {
+ rxq->errors++;
+ rte_pktmbuf_free(mbuf);
+ goto err_set;
+ }
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+
+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, LD)) {
+ eop = 0;
+ pkt_len = rxq->buf_size;
+ data_len = pkt_len;
+ } else {
+ eop = 1;
+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, PL);
+ data_len = pkt_len - rxq->crc_len;
+ }
+
+ if (first_seg != NULL) {
+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
+ rte_mempool_put(rxq->mb_pool,
+ first_seg);
+ } else {
+ first_seg = mbuf;
+ }
+
+ /* Get the RSS hash */
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+
+ /* Mbuf populate */
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->data_len = data_len;
+
+err_set:
+ rxq->cur++;
+ rxq->sw_ring[idx++] = tmbuf;
+ desc->read.baddr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+ memset((void *)(&desc->read.desc2), 0, 8);
+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+ rxq->dirty++;
+
+ if (!eop) {
+ rte_pktmbuf_free(mbuf);
+ goto next_desc;
+ }
+
+ first_seg->pkt_len = pkt_len;
+ rxq->bytes += pkt_len;
+ mbuf->next = NULL;
+
+ first_seg->port = rxq->port_id;
+ if (rxq->pdata->rx_csum_enable) {
+ mbuf->ol_flags = 0;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else if (unlikely(error_status
+ == AXGBE_L4_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+
+ rx_pkts[nb_rx++] = first_seg;
+
+ /* Setup receipt context for a new packet.*/
+ first_seg = NULL;
+ }
+
+ /* Save receive context.*/
+ rxq->pkts += nb_rx;
+
+ if (rxq->dirty != old_dirty) {
+ rte_wmb();
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (idx * sizeof(union axgbe_rx_desc))));
+ }
+ return nb_rx;
+}
+
/* Tx Apis */
static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
{
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
index a21537df9..f6796b09b 100644
--- a/drivers/net/axgbe/axgbe_rxtx.h
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
--
2.17.1
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-02-27 6:33 [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx ssebasti
@ 2020-02-27 14:28 ` Ferruh Yigit
2020-02-27 14:35 ` Sebastian, Selwin
2020-02-28 13:09 ` Ferruh Yigit
1 sibling, 1 reply; 11+ messages in thread
From: Ferruh Yigit @ 2020-02-27 14:28 UTC (permalink / raw)
To: ssebasti, dev
On 2/27/2020 6:33 AM, ssebasti@amd.com wrote:
> From: Selwin Sebastian <selwin.sebastian@amd.com>
>
> Enable scattered rx support and add jumbo packet transmit capability
>
> Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
<...>
> @@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> + DEV_RX_OFFLOAD_JUMBO_FRAME |
> + DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_KEEP_CRC;
>
> dev_info->tx_offload_capa =
> DEV_TX_OFFLOAD_IPV4_CKSUM |
> DEV_TX_OFFLOAD_UDP_CKSUM |
> + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
> + DEV_TX_OFFLOAD_UDP_TSO |
> + DEV_TX_OFFLOAD_SCTP_CKSUM |
> + DEV_TX_OFFLOAD_MULTI_SEGS |
> DEV_TX_OFFLOAD_TCP_CKSUM;
Is the Tx offload capability update related to the this change? If it is not can
you please send these updates as a separate patch, and a send new version of
this patch without this bit?
Thanks,
ferruh
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-02-27 14:28 ` Ferruh Yigit
@ 2020-02-27 14:35 ` Sebastian, Selwin
2020-02-27 14:38 ` Ferruh Yigit
0 siblings, 1 reply; 11+ messages in thread
From: Sebastian, Selwin @ 2020-02-27 14:35 UTC (permalink / raw)
To: Ferruh Yigit, dev
[AMD Official Use Only - Internal Distribution Only]
Hi Ferruh,
For validation of scatter using the testpmd method mentioned in dpdk docs, we need to have these Tx offloads also enabled.
Thanks and Regards
Selwin Sebastian
-----Original Message-----
From: Ferruh Yigit <ferruh.yigit@intel.com>
Sent: Thursday, February 27, 2020 7:59 PM
To: Sebastian, Selwin <Selwin.Sebastian@amd.com>; dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
[CAUTION: External Email]
On 2/27/2020 6:33 AM, ssebasti@amd.com wrote:
> From: Selwin Sebastian <selwin.sebastian@amd.com>
>
> Enable scattered rx support and add jumbo packet transmit capability
>
> Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
<...>
> @@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
> + DEV_RX_OFFLOAD_JUMBO_FRAME |
> + DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_KEEP_CRC;
>
> dev_info->tx_offload_capa =
> DEV_TX_OFFLOAD_IPV4_CKSUM |
> DEV_TX_OFFLOAD_UDP_CKSUM |
> + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
> + DEV_TX_OFFLOAD_UDP_TSO |
> + DEV_TX_OFFLOAD_SCTP_CKSUM |
> + DEV_TX_OFFLOAD_MULTI_SEGS |
> DEV_TX_OFFLOAD_TCP_CKSUM;
Is the Tx offload capability update related to the this change? If it is not can you please send these updates as a separate patch, and a send new version of this patch without this bit?
Thanks,
ferruh
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-02-27 14:35 ` Sebastian, Selwin
@ 2020-02-27 14:38 ` Ferruh Yigit
2020-02-28 9:00 ` Sebastian, Selwin
0 siblings, 1 reply; 11+ messages in thread
From: Ferruh Yigit @ 2020-02-27 14:38 UTC (permalink / raw)
To: Sebastian, Selwin, dev
On 2/27/2020 2:35 PM, Sebastian, Selwin wrote:
> [AMD Official Use Only - Internal Distribution Only]
>
> Hi Ferruh,
> For validation of scatter using the testpmd method mentioned in dpdk docs, we need to have these Tx offloads also enabled.
[Please don't top post, it makes conversation hard to follow.]
Can you point where there testpmd requirement in the code?
Also these offloads should be enabled when HW/Driver supports it, not for it is
required by testpmd.
>
> Thanks and Regards
> Selwin Sebastian
>
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Thursday, February 27, 2020 7:59 PM
> To: Sebastian, Selwin <Selwin.Sebastian@amd.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
>
> [CAUTION: External Email]
>
> On 2/27/2020 6:33 AM, ssebasti@amd.com wrote:
>> From: Selwin Sebastian <selwin.sebastian@amd.com>
>>
>> Enable scattered rx support and add jumbo packet transmit capability
>>
>> Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>
> <...>
>
>> @@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>> DEV_RX_OFFLOAD_IPV4_CKSUM |
>> DEV_RX_OFFLOAD_UDP_CKSUM |
>> DEV_RX_OFFLOAD_TCP_CKSUM |
>> + DEV_RX_OFFLOAD_JUMBO_FRAME |
>> + DEV_RX_OFFLOAD_SCATTER |
>> DEV_RX_OFFLOAD_KEEP_CRC;
>>
>> dev_info->tx_offload_capa =
>> DEV_TX_OFFLOAD_IPV4_CKSUM |
>> DEV_TX_OFFLOAD_UDP_CKSUM |
>> + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
>> + DEV_TX_OFFLOAD_UDP_TSO |
>> + DEV_TX_OFFLOAD_SCTP_CKSUM |
>> + DEV_TX_OFFLOAD_MULTI_SEGS |
>> DEV_TX_OFFLOAD_TCP_CKSUM;
>
> Is the Tx offload capability update related to the this change? If it is not can you please send these updates as a separate patch, and a send new version of this patch without this bit?
>
> Thanks,
> ferruh
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-02-27 14:38 ` Ferruh Yigit
@ 2020-02-28 9:00 ` Sebastian, Selwin
2020-02-28 13:08 ` Ferruh Yigit
0 siblings, 1 reply; 11+ messages in thread
From: Sebastian, Selwin @ 2020-02-28 9:00 UTC (permalink / raw)
To: Ferruh Yigit, dev
Thanks and Regards
Selwin Sebastian
-----Original Message-----
From: Ferruh Yigit <ferruh.yigit@intel.com>
Sent: Thursday, February 27, 2020 8:08 PM
To: Sebastian, Selwin <Selwin.Sebastian@amd.com>; dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
[CAUTION: External Email]
On 2/27/2020 2:35 PM, Sebastian, Selwin wrote:
> [AMD Official Use Only - Internal Distribution Only]
>
> Hi Ferruh,
> For validation of scatter using the testpmd method mentioned in dpdk docs, we need to have these Tx offloads also enabled.
[Please don't top post, it makes conversation hard to follow.]
Can you point where there testpmd requirement in the code?
https://doc.dpdk.org/dts/test_plans/scatter_test_plan.html talks about DEV_TX_OFFLOAD_MULTI_SEGS as a prerequisites for scatter . When I add DEV_TX_OFFLOAD_MULTI_SEGS for tx capability , I was forced to add " DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM , DEV_TX_OFFLOAD_UDP_TSO , DEV_TX_OFFLOAD_SCTP_CKSUM" for the mentioned testpmd to run.
Also these offloads should be enabled when HW/Driver supports it, not for it is required by testpmd.
I am able to test scatter without adding "DEV_TX_OFFLOAD_MULTI_SEGS" tx capability. Shall I submit version2 of the patch after removing " DEV_TX_OFFLOAD_MULTI_SEGS, DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM , DEV_TX_OFFLOAD_UDP_TSO, DEV_TX_OFFLOAD_SCTP_CKSUM" capabilities. ?
>
> Thanks and Regards
> Selwin Sebastian
>
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Thursday, February 27, 2020 7:59 PM
> To: Sebastian, Selwin <Selwin.Sebastian@amd.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for
> Scattered Rx
>
> [CAUTION: External Email]
>
> On 2/27/2020 6:33 AM, ssebasti@amd.com wrote:
>> From: Selwin Sebastian <selwin.sebastian@amd.com>
>>
>> Enable scattered rx support and add jumbo packet transmit capability
>>
>> Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>
> <...>
>
>> @@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>> DEV_RX_OFFLOAD_IPV4_CKSUM |
>> DEV_RX_OFFLOAD_UDP_CKSUM |
>> DEV_RX_OFFLOAD_TCP_CKSUM |
>> + DEV_RX_OFFLOAD_JUMBO_FRAME |
>> + DEV_RX_OFFLOAD_SCATTER |
>> DEV_RX_OFFLOAD_KEEP_CRC;
>>
>> dev_info->tx_offload_capa =
>> DEV_TX_OFFLOAD_IPV4_CKSUM |
>> DEV_TX_OFFLOAD_UDP_CKSUM |
>> + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
>> + DEV_TX_OFFLOAD_UDP_TSO |
>> + DEV_TX_OFFLOAD_SCTP_CKSUM |
>> + DEV_TX_OFFLOAD_MULTI_SEGS |
>> DEV_TX_OFFLOAD_TCP_CKSUM;
>
> Is the Tx offload capability update related to the this change? If it is not can you please send these updates as a separate patch, and a send new version of this patch without this bit?
>
> Thanks,
> ferruh
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-02-28 9:00 ` Sebastian, Selwin
@ 2020-02-28 13:08 ` Ferruh Yigit
0 siblings, 0 replies; 11+ messages in thread
From: Ferruh Yigit @ 2020-02-28 13:08 UTC (permalink / raw)
To: Sebastian, Selwin, dev
On 2/28/2020 9:00 AM, Sebastian, Selwin wrote:
>
> Thanks and Regards
> Selwin Sebastian
>
>
>
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Thursday, February 27, 2020 8:08 PM
> To: Sebastian, Selwin <Selwin.Sebastian@amd.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
>
> [CAUTION: External Email]
>
> On 2/27/2020 2:35 PM, Sebastian, Selwin wrote:
>> [AMD Official Use Only - Internal Distribution Only]
>>
>> Hi Ferruh,
>> For validation of scatter using the testpmd method mentioned in dpdk docs, we need to have these Tx offloads also enabled.
>
> [Please don't top post, it makes conversation hard to follow.]
>
> Can you point where there testpmd requirement in the code?
> https://doc.dpdk.org/dts/test_plans/scatter_test_plan.html talks about DEV_TX_OFFLOAD_MULTI_SEGS as a prerequisites for scatter .
Above document has a good point that:
"The forwarding of scattered input packets naturally enforces the transmission
of scattered packets by PMD transmit functions."
If you only support scattered Rx, but not Tx (as your case), you may have
unexpected results.
But this doesn't mean the multi segment Tx should be announced when it is not
supported.
> When I add DEV_TX_OFFLOAD_MULTI_SEGS for tx capability , I was forced to add " DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM , DEV_TX_OFFLOAD_UDP_TSO , DEV_TX_OFFLOAD_SCTP_CKSUM" for the mentioned testpmd to run.
It is still no clear why you are forced to add those capabilities, if you can
point me the code in testpmd that forces you, I can comment better.
>
> Also these offloads should be enabled when HW/Driver supports it, not for it is required by testpmd.
>
> I am able to test scatter without adding "DEV_TX_OFFLOAD_MULTI_SEGS" tx capability. Shall I submit version2 of the patch after removing " DEV_TX_OFFLOAD_MULTI_SEGS, DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM , DEV_TX_OFFLOAD_UDP_TSO, DEV_TX_OFFLOAD_SCTP_CKSUM" capabilities. ?
First of all, offload capabilities are set based on what offload HW/Driver
support. As far as I can see axgbe doesn't support multi segment Tx, so you
shouldn't set 'DEV_TX_OFFLOAD_MULTI_SEGS' at all, same for all offload flags.
If those list of offloads are supported by HW/Driver, but previously forgotten
to announce them as capability, please send a separate patch for it, with proper
fixes line which points the commit adding the support.
>
>>
>> Thanks and Regards
>> Selwin Sebastian
>>
>> -----Original Message-----
>> From: Ferruh Yigit <ferruh.yigit@intel.com>
>> Sent: Thursday, February 27, 2020 7:59 PM
>> To: Sebastian, Selwin <Selwin.Sebastian@amd.com>; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for
>> Scattered Rx
>>
>> [CAUTION: External Email]
>>
>> On 2/27/2020 6:33 AM, ssebasti@amd.com wrote:
>>> From: Selwin Sebastian <selwin.sebastian@amd.com>
>>>
>>> Enable scattered rx support and add jumbo packet transmit capability
>>>
>>> Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>>
>> <...>
>>
>>> @@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>>> DEV_RX_OFFLOAD_IPV4_CKSUM |
>>> DEV_RX_OFFLOAD_UDP_CKSUM |
>>> DEV_RX_OFFLOAD_TCP_CKSUM |
>>> + DEV_RX_OFFLOAD_JUMBO_FRAME |
>>> + DEV_RX_OFFLOAD_SCATTER |
>>> DEV_RX_OFFLOAD_KEEP_CRC;
>>>
>>> dev_info->tx_offload_capa =
>>> DEV_TX_OFFLOAD_IPV4_CKSUM |
>>> DEV_TX_OFFLOAD_UDP_CKSUM |
>>> + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
>>> + DEV_TX_OFFLOAD_UDP_TSO |
>>> + DEV_TX_OFFLOAD_SCTP_CKSUM |
>>> + DEV_TX_OFFLOAD_MULTI_SEGS |
>>> DEV_TX_OFFLOAD_TCP_CKSUM;
>>
>> Is the Tx offload capability update related to the this change? If it is not can you please send these updates as a separate patch, and a send new version of this patch without this bit?
>>
>> Thanks,
>> ferruh
>>
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-02-27 6:33 [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx ssebasti
2020-02-27 14:28 ` Ferruh Yigit
@ 2020-02-28 13:09 ` Ferruh Yigit
1 sibling, 0 replies; 11+ messages in thread
From: Ferruh Yigit @ 2020-02-28 13:09 UTC (permalink / raw)
To: ssebasti, dev; +Cc: Ravi Kumar
On 2/27/2020 6:33 AM, ssebasti@amd.com wrote:
> From: Selwin Sebastian <selwin.sebastian@amd.com>
>
> Enable scattered rx support and add jumbo packet transmit capability
>
> Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
<...>
> @@ -1018,9 +1024,19 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
> struct rte_pci_device *pci_dev;
> uint32_t reg, mac_lo, mac_hi;
> int ret;
> + struct rte_eth_dev_info dev_info = { 0 };
>
> eth_dev->dev_ops = &axgbe_eth_dev_ops;
> - eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
> + eth_dev->dev_ops->dev_infos_get(eth_dev, &dev_info);
> +
> + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
> + eth_dev->data->scattered_rx = 1;
Should check the requested config 'eth_dev->data->dev_conf.rxmode.offloads", not
the capability that driver set itself (dev_info.rx_offload_capa).
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-03-05 10:20 ` Kumar, Ravi1
@ 2020-03-05 10:27 ` Kumar, Ravi1
0 siblings, 0 replies; 11+ messages in thread
From: Kumar, Ravi1 @ 2020-03-05 10:27 UTC (permalink / raw)
To: Sebastian, Selwin, dev
Please scratch this. I just now acked the V3.
Regards,
Ravi
>
>
>-----Original Message-----
>From: Kumar, Ravi1
>Sent: Thursday, March 5, 2020 3:51 PM
>To: Sebastian, Selwin <Selwin.Sebastian@amd.com>; 'dev@dpdk.org' <dev@dpdk.org>
>Subject: RE: [PATCH v1] net/axgbe: add support for Scattered Rx
>
>Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
>
>>From: Selwin Sebastian <selwin.sebastian@amd.com>
>>
>>Enable scattered rx support and add jumbo packet receive capability
>>
>>Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>>---
>> doc/guides/nics/features/axgbe.ini | 1 +
>> drivers/net/axgbe/axgbe_common.h | 2 +
>> drivers/net/axgbe/axgbe_ethdev.c | 16 +++-
>> drivers/net/axgbe/axgbe_rxtx.c | 145 +++++++++++++++++++++++++++++
>> drivers/net/axgbe/axgbe_rxtx.h | 2 +
>> 5 files changed, 165 insertions(+), 1 deletion(-)
>>
>>diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
>>index ab4da559f..0becaa097 100644
>>--- a/doc/guides/nics/features/axgbe.ini
>>+++ b/doc/guides/nics/features/axgbe.ini
>>@@ -7,6 +7,7 @@
>> Speed capabilities = Y
>> Link status = Y
>> Jumbo frame = Y
>>+Scattered Rx = Y
>> Promiscuous mode = Y
>> Allmulticast mode = Y
>> RSS hash = Y
>>diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
>>index fdb037dd5..fbd46150c 100644
>>--- a/drivers/net/axgbe/axgbe_common.h
>>+++ b/drivers/net/axgbe/axgbe_common.h
>>@@ -1135,6 +1135,8 @@
>> #define RX_NORMAL_DESC3_PL_WIDTH 14
>> #define RX_NORMAL_DESC3_RSV_INDEX 26
>> #define RX_NORMAL_DESC3_RSV_WIDTH 1
>>+#define RX_NORMAL_DESC3_LD_INDEX 28
>>+#define RX_NORMAL_DESC3_LD_WIDTH 1
>>
>> #define RX_DESC3_L34T_IPV4_TCP 1
>> #define RX_DESC3_L34T_IPV4_UDP 2
>>diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
>>index d0b6f091f..013c6330d 100644
>>--- a/drivers/net/axgbe/axgbe_ethdev.c
>>+++ b/drivers/net/axgbe/axgbe_ethdev.c
>>@@ -249,6 +249,8 @@ axgbe_dev_start(struct rte_eth_dev *dev) {
>> struct axgbe_port *pdata = dev->data->dev_private;
>> int ret;
>>+ struct rte_eth_dev_data *dev_data = dev->data;
>>+ uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
>>
>> PMD_INIT_FUNC_TRACE();
>>
>>@@ -279,6 +281,17 @@ axgbe_dev_start(struct rte_eth_dev *dev)
>>
>> axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
>> axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
>>+
>>+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
>>+ max_pkt_len > pdata->rx_buf_size)
>>+ dev_data->scattered_rx = 1;
>>+
>>+ /* Scatter Rx handling */
>>+ if (dev_data->scattered_rx)
>>+ dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
>>+ else
>>+ dev->rx_pkt_burst = &axgbe_recv_pkts;
>>+
>> return 0;
>> }
>>
>>@@ -789,6 +802,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>> DEV_RX_OFFLOAD_IPV4_CKSUM |
>> DEV_RX_OFFLOAD_UDP_CKSUM |
>> DEV_RX_OFFLOAD_TCP_CKSUM |
>>+ DEV_RX_OFFLOAD_JUMBO_FRAME |
>>+ DEV_RX_OFFLOAD_SCATTER |
>> DEV_RX_OFFLOAD_KEEP_CRC;
>>
>> dev_info->tx_offload_capa =
>>@@ -1020,7 +1035,6 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
>> int ret;
>>
>> eth_dev->dev_ops = &axgbe_eth_dev_ops;
>>- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>>
>> /*
>> * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 96055c25b..8f818eb89 100644
>>--- a/drivers/net/axgbe/axgbe_rxtx.c
>>+++ b/drivers/net/axgbe/axgbe_rxtx.c
>>@@ -307,6 +307,151 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>> return nb_rx;
>> }
>>
>>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
>>+ PMD_INIT_FUNC_TRACE();
>>+ uint16_t nb_rx = 0;
>>+ struct axgbe_rx_queue *rxq = rx_queue;
>>+ volatile union axgbe_rx_desc *desc;
>>+
>>+ uint64_t old_dirty = rxq->dirty;
>>+ struct rte_mbuf *first_seg = NULL;
>>+ struct rte_mbuf *mbuf, *tmbuf;
>>+ unsigned int err;
>>+ uint32_t error_status;
>>+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
>>+
>>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
>>+ while (nb_rx < nb_pkts) {
>>+ bool eop = 0;
>>+next_desc:
>>+ if (unlikely(idx == rxq->nb_desc))
>>+ idx = 0;
>>+
>>+ desc = &rxq->desc[idx];
>>+
>>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
>>+ break;
>>+
>>+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
>>+ if (unlikely(!tmbuf)) {
>>+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
>>+ " queue_id = %u\n",
>>+ (unsigned int)rxq->port_id,
>>+ (unsigned int)rxq->queue_id);
>>+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
>>+ break;
>>+ }
>>+
>>+ pidx = idx + 1;
>>+ if (unlikely(pidx == rxq->nb_desc))
>>+ pidx = 0;
>>+
>>+ rte_prefetch0(rxq->sw_ring[pidx]);
>>+ if ((pidx & 0x3) == 0) {
>>+ rte_prefetch0(&rxq->desc[pidx]);
>>+ rte_prefetch0(&rxq->sw_ring[pidx]);
>>+ }
>>+
>>+ mbuf = rxq->sw_ring[idx];
>>+ /* Check for any errors and free mbuf*/
>>+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
>>+ RX_NORMAL_DESC3, ES);
>>+ error_status = 0;
>>+ if (unlikely(err)) {
>>+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
>>+ if ((error_status != AXGBE_L3_CSUM_ERR)
>>+ && (error_status != AXGBE_L4_CSUM_ERR)) {
>>+ rxq->errors++;
>>+ rte_pktmbuf_free(mbuf);
>>+ goto err_set;
>>+ }
>>+ }
>>+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
>>+
>>+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
>>+ RX_NORMAL_DESC3, LD)) {
>>+ eop = 0;
>>+ pkt_len = rxq->buf_size;
>>+ data_len = pkt_len;
>>+ } else {
>>+ eop = 1;
>>+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
>>+ RX_NORMAL_DESC3, PL);
>>+ data_len = pkt_len - rxq->crc_len;
>>+ }
>>+
>>+ if (first_seg != NULL) {
>>+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
>>+ rte_mempool_put(rxq->mb_pool,
>>+ first_seg);
>>+ } else {
>>+ first_seg = mbuf;
>>+ }
>>+
>>+ /* Get the RSS hash */
>>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>>+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>>+
>>+ /* Mbuf populate */
>>+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
>>+ mbuf->data_len = data_len;
>>+
>>+err_set:
>>+ rxq->cur++;
>>+ rxq->sw_ring[idx++] = tmbuf;
>>+ desc->read.baddr =
>>+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
>>+ memset((void *)(&desc->read.desc2), 0, 8);
>>+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
>>+ rxq->dirty++;
>>+
>>+ if (!eop) {
>>+ rte_pktmbuf_free(mbuf);
>>+ goto next_desc;
>>+ }
>>+
>>+ first_seg->pkt_len = pkt_len;
>>+ rxq->bytes += pkt_len;
>>+ mbuf->next = NULL;
>>+
>>+ first_seg->port = rxq->port_id;
>>+ if (rxq->pdata->rx_csum_enable) {
>>+ mbuf->ol_flags = 0;
>>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
>>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
>>+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
>>+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
>>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
>>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
>>+ } else if (unlikely(error_status
>>+ == AXGBE_L4_CSUM_ERR)) {
>>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
>>+ }
>>+ }
>>+
>>+ rx_pkts[nb_rx++] = first_seg;
>>+
>>+ /* Setup receipt context for a new packet.*/
>>+ first_seg = NULL;
>>+ }
>>+
>>+ /* Save receive context.*/
>>+ rxq->pkts += nb_rx;
>>+
>>+ if (rxq->dirty != old_dirty) {
>>+ rte_wmb();
>>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
>>+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
>>+ low32_value(rxq->ring_phys_addr +
>>+ (idx * sizeof(union axgbe_rx_desc))));
>>+ }
>>+ return nb_rx;
>>+}
>>+
>> /* Tx Apis */
>> static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) { diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index a21537df9..f6796b09b 100644
>>--- a/drivers/net/axgbe/axgbe_rxtx.h
>>+++ b/drivers/net/axgbe/axgbe_rxtx.h
>>@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>> uint16_t nb_pkts);
>>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
>> uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
>> struct rte_mbuf **rx_pkts,
>> uint16_t nb_pkts);
>>--
>>2.17.1
>>
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-02-26 5:47 ` Kumar, Ravi1
@ 2020-03-05 10:20 ` Kumar, Ravi1
2020-03-05 10:27 ` Kumar, Ravi1
0 siblings, 1 reply; 11+ messages in thread
From: Kumar, Ravi1 @ 2020-03-05 10:20 UTC (permalink / raw)
To: Sebastian, Selwin, dev
Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
>From: Selwin Sebastian <selwin.sebastian@amd.com>
>
>Enable scattered rx support and add jumbo packet receive capability
>
>Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>---
> doc/guides/nics/features/axgbe.ini | 1 +
> drivers/net/axgbe/axgbe_common.h | 2 +
> drivers/net/axgbe/axgbe_ethdev.c | 16 +++-
> drivers/net/axgbe/axgbe_rxtx.c | 145 +++++++++++++++++++++++++++++
> drivers/net/axgbe/axgbe_rxtx.h | 2 +
> 5 files changed, 165 insertions(+), 1 deletion(-)
>
>diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
>index ab4da559f..0becaa097 100644
>--- a/doc/guides/nics/features/axgbe.ini
>+++ b/doc/guides/nics/features/axgbe.ini
>@@ -7,6 +7,7 @@
> Speed capabilities = Y
> Link status = Y
> Jumbo frame = Y
>+Scattered Rx = Y
> Promiscuous mode = Y
> Allmulticast mode = Y
> RSS hash = Y
>diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
>index fdb037dd5..fbd46150c 100644
>--- a/drivers/net/axgbe/axgbe_common.h
>+++ b/drivers/net/axgbe/axgbe_common.h
>@@ -1135,6 +1135,8 @@
> #define RX_NORMAL_DESC3_PL_WIDTH 14
> #define RX_NORMAL_DESC3_RSV_INDEX 26
> #define RX_NORMAL_DESC3_RSV_WIDTH 1
>+#define RX_NORMAL_DESC3_LD_INDEX 28
>+#define RX_NORMAL_DESC3_LD_WIDTH 1
>
> #define RX_DESC3_L34T_IPV4_TCP 1
> #define RX_DESC3_L34T_IPV4_UDP 2
>diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
>index d0b6f091f..013c6330d 100644
>--- a/drivers/net/axgbe/axgbe_ethdev.c
>+++ b/drivers/net/axgbe/axgbe_ethdev.c
>@@ -249,6 +249,8 @@ axgbe_dev_start(struct rte_eth_dev *dev) {
> struct axgbe_port *pdata = dev->data->dev_private;
> int ret;
>+ struct rte_eth_dev_data *dev_data = dev->data;
>+ uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
>
> PMD_INIT_FUNC_TRACE();
>
>@@ -279,6 +281,17 @@ axgbe_dev_start(struct rte_eth_dev *dev)
>
> axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
> axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
>+
>+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
>+ max_pkt_len > pdata->rx_buf_size)
>+ dev_data->scattered_rx = 1;
>+
>+ /* Scatter Rx handling */
>+ if (dev_data->scattered_rx)
>+ dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
>+ else
>+ dev->rx_pkt_burst = &axgbe_recv_pkts;
>+
> return 0;
> }
>
>@@ -789,6 +802,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
>+ DEV_RX_OFFLOAD_JUMBO_FRAME |
>+ DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_KEEP_CRC;
>
> dev_info->tx_offload_capa =
>@@ -1020,7 +1035,6 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
> int ret;
>
> eth_dev->dev_ops = &axgbe_eth_dev_ops;
>- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>
> /*
> * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 96055c25b..8f818eb89 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.c
>+++ b/drivers/net/axgbe/axgbe_rxtx.c
>@@ -307,6 +307,151 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> return nb_rx;
> }
>
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
>+ PMD_INIT_FUNC_TRACE();
>+ uint16_t nb_rx = 0;
>+ struct axgbe_rx_queue *rxq = rx_queue;
>+ volatile union axgbe_rx_desc *desc;
>+
>+ uint64_t old_dirty = rxq->dirty;
>+ struct rte_mbuf *first_seg = NULL;
>+ struct rte_mbuf *mbuf, *tmbuf;
>+ unsigned int err;
>+ uint32_t error_status;
>+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
>+
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
>+ while (nb_rx < nb_pkts) {
>+ bool eop = 0;
>+next_desc:
>+ if (unlikely(idx == rxq->nb_desc))
>+ idx = 0;
>+
>+ desc = &rxq->desc[idx];
>+
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
>+ break;
>+
>+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
>+ if (unlikely(!tmbuf)) {
>+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
>+ " queue_id = %u\n",
>+ (unsigned int)rxq->port_id,
>+ (unsigned int)rxq->queue_id);
>+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
>+ break;
>+ }
>+
>+ pidx = idx + 1;
>+ if (unlikely(pidx == rxq->nb_desc))
>+ pidx = 0;
>+
>+ rte_prefetch0(rxq->sw_ring[pidx]);
>+ if ((pidx & 0x3) == 0) {
>+ rte_prefetch0(&rxq->desc[pidx]);
>+ rte_prefetch0(&rxq->sw_ring[pidx]);
>+ }
>+
>+ mbuf = rxq->sw_ring[idx];
>+ /* Check for any errors and free mbuf*/
>+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, ES);
>+ error_status = 0;
>+ if (unlikely(err)) {
>+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
>+ if ((error_status != AXGBE_L3_CSUM_ERR)
>+ && (error_status != AXGBE_L4_CSUM_ERR)) {
>+ rxq->errors++;
>+ rte_pktmbuf_free(mbuf);
>+ goto err_set;
>+ }
>+ }
>+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
>+
>+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, LD)) {
>+ eop = 0;
>+ pkt_len = rxq->buf_size;
>+ data_len = pkt_len;
>+ } else {
>+ eop = 1;
>+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, PL);
>+ data_len = pkt_len - rxq->crc_len;
>+ }
>+
>+ if (first_seg != NULL) {
>+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
>+ rte_mempool_put(rxq->mb_pool,
>+ first_seg);
>+ } else {
>+ first_seg = mbuf;
>+ }
>+
>+ /* Get the RSS hash */
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>+
>+ /* Mbuf populate */
>+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
>+ mbuf->data_len = data_len;
>+
>+err_set:
>+ rxq->cur++;
>+ rxq->sw_ring[idx++] = tmbuf;
>+ desc->read.baddr =
>+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
>+ memset((void *)(&desc->read.desc2), 0, 8);
>+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
>+ rxq->dirty++;
>+
>+ if (!eop) {
>+ rte_pktmbuf_free(mbuf);
>+ goto next_desc;
>+ }
>+
>+ first_seg->pkt_len = pkt_len;
>+ rxq->bytes += pkt_len;
>+ mbuf->next = NULL;
>+
>+ first_seg->port = rxq->port_id;
>+ if (rxq->pdata->rx_csum_enable) {
>+ mbuf->ol_flags = 0;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
>+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
>+ } else if (unlikely(error_status
>+ == AXGBE_L4_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
>+ }
>+ }
>+
>+ rx_pkts[nb_rx++] = first_seg;
>+
>+ /* Setup receipt context for a new packet.*/
>+ first_seg = NULL;
>+ }
>+
>+ /* Save receive context.*/
>+ rxq->pkts += nb_rx;
>+
>+ if (rxq->dirty != old_dirty) {
>+ rte_wmb();
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
>+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
>+ low32_value(rxq->ring_phys_addr +
>+ (idx * sizeof(union axgbe_rx_desc))));
>+ }
>+ return nb_rx;
>+}
>+
> /* Tx Apis */
> static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) { diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index a21537df9..f6796b09b 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.h
>+++ b/drivers/net/axgbe/axgbe_rxtx.h
>@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
> uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>--
>2.17.1
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
2020-02-25 12:40 ssebasti
@ 2020-02-26 5:47 ` Kumar, Ravi1
2020-03-05 10:20 ` Kumar, Ravi1
0 siblings, 1 reply; 11+ messages in thread
From: Kumar, Ravi1 @ 2020-02-26 5:47 UTC (permalink / raw)
To: Sebastian, Selwin, dev
[AMD Official Use Only - Internal Distribution Only]
Acked-by: Ravi Kumar <ravi1.kumar@amd.com>
>
>
>-----Original Message-----
>From: Sebastian, Selwin <Selwin.Sebastian@amd.com>
>Sent: Tuesday, February 25, 2020 6:10 PM
>To: dev@dpdk.org
>Cc: Kumar, Ravi1 <Ravi1.Kumar@amd.com>
>Subject: [PATCH v1] net/axgbe: add support for Scattered Rx
>
>From: Selwin Sebastian <selwin.sebastian@amd.com>
>
>Enable scattered rx support and add jumbo packet transmit capability
>
>Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
>---
> doc/guides/nics/features/axgbe.ini | 1 +
> drivers/net/axgbe/axgbe_common.h | 2 +
> drivers/net/axgbe/axgbe_ethdev.c | 18 +++-
> drivers/net/axgbe/axgbe_rxtx.c | 146 +++++++++++++++++++++++++++++
> drivers/net/axgbe/axgbe_rxtx.h | 2 +
> 5 files changed, 168 insertions(+), 1 deletion(-)
>
>diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
>index ab4da559f..0becaa097 100644
>--- a/doc/guides/nics/features/axgbe.ini
>+++ b/doc/guides/nics/features/axgbe.ini
>@@ -7,6 +7,7 @@
> Speed capabilities = Y
> Link status = Y
> Jumbo frame = Y
>+Scattered Rx = Y
> Promiscuous mode = Y
> Allmulticast mode = Y
> RSS hash = Y
>diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
>index fdb037dd5..fbd46150c 100644
>--- a/drivers/net/axgbe/axgbe_common.h
>+++ b/drivers/net/axgbe/axgbe_common.h
>@@ -1135,6 +1135,8 @@
> #define RX_NORMAL_DESC3_PL_WIDTH 14
> #define RX_NORMAL_DESC3_RSV_INDEX 26
> #define RX_NORMAL_DESC3_RSV_WIDTH 1
>+#define RX_NORMAL_DESC3_LD_INDEX 28
>+#define RX_NORMAL_DESC3_LD_WIDTH 1
>
> #define RX_DESC3_L34T_IPV4_TCP 1
> #define RX_DESC3_L34T_IPV4_UDP 2
>diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
>index d0b6f091f..eb2f51f89 100644
>--- a/drivers/net/axgbe/axgbe_ethdev.c
>+++ b/drivers/net/axgbe/axgbe_ethdev.c
>@@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> DEV_RX_OFFLOAD_IPV4_CKSUM |
> DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM |
>+ DEV_RX_OFFLOAD_JUMBO_FRAME |
>+ DEV_RX_OFFLOAD_SCATTER |
> DEV_RX_OFFLOAD_KEEP_CRC;
>
> dev_info->tx_offload_capa =
> DEV_TX_OFFLOAD_IPV4_CKSUM |
> DEV_TX_OFFLOAD_UDP_CKSUM |
>+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
>+ DEV_TX_OFFLOAD_UDP_TSO |
>+ DEV_TX_OFFLOAD_SCTP_CKSUM |
>+ DEV_TX_OFFLOAD_MULTI_SEGS |
> DEV_TX_OFFLOAD_TCP_CKSUM;
>
> if (pdata->hw_feat.rss) {
>@@ -1018,9 +1024,19 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
> struct rte_pci_device *pci_dev;
> uint32_t reg, mac_lo, mac_hi;
> int ret;
>+ struct rte_eth_dev_info dev_info = { 0 };
>
> eth_dev->dev_ops = &axgbe_eth_dev_ops;
>- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>+ eth_dev->dev_ops->dev_infos_get(eth_dev, &dev_info);
>+
>+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
>+ eth_dev->data->scattered_rx = 1;
>+
>+ /* Scatter Rx handling */
>+ if (eth_dev->data->scattered_rx)
>+ eth_dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
>+ else
>+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
>
> /*
> * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 96055c25b..57e2bbb34 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.c
>+++ b/drivers/net/axgbe/axgbe_rxtx.c
>@@ -307,6 +307,152 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> return nb_rx;
> }
>
>+
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
>+ PMD_INIT_FUNC_TRACE();
>+ uint16_t nb_rx = 0;
>+ struct axgbe_rx_queue *rxq = rx_queue;
>+ volatile union axgbe_rx_desc *desc;
>+
>+ uint64_t old_dirty = rxq->dirty;
>+ struct rte_mbuf *first_seg = NULL;
>+ struct rte_mbuf *mbuf, *tmbuf;
>+ unsigned int err;
>+ uint32_t error_status;
>+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
>+
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
>+ while (nb_rx < nb_pkts) {
>+ bool eop = 0;
>+next_desc:
>+ if (unlikely(idx == rxq->nb_desc))
>+ idx = 0;
>+
>+ desc = &rxq->desc[idx];
>+
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
>+ break;
>+
>+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
>+ if (unlikely(!tmbuf)) {
>+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
>+ " queue_id = %u\n",
>+ (unsigned int)rxq->port_id,
>+ (unsigned int)rxq->queue_id);
>+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
>+ break;
>+ }
>+
>+ pidx = idx + 1;
>+ if (unlikely(pidx == rxq->nb_desc))
>+ pidx = 0;
>+
>+ rte_prefetch0(rxq->sw_ring[pidx]);
>+ if ((pidx & 0x3) == 0) {
>+ rte_prefetch0(&rxq->desc[pidx]);
>+ rte_prefetch0(&rxq->sw_ring[pidx]);
>+ }
>+
>+ mbuf = rxq->sw_ring[idx];
>+ /* Check for any errors and free mbuf*/
>+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, ES);
>+ error_status = 0;
>+ if (unlikely(err)) {
>+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
>+ if ((error_status != AXGBE_L3_CSUM_ERR)
>+ && (error_status != AXGBE_L4_CSUM_ERR)) {
>+ rxq->errors++;
>+ rte_pktmbuf_free(mbuf);
>+ goto err_set;
>+ }
>+ }
>+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
>+
>+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, LD)) {
>+ eop = 0;
>+ pkt_len = rxq->buf_size;
>+ data_len = pkt_len;
>+ } else {
>+ eop = 1;
>+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
>+ RX_NORMAL_DESC3, PL);
>+ data_len = pkt_len - rxq->crc_len;
>+ }
>+
>+ if (first_seg != NULL) {
>+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
>+ rte_mempool_put(rxq->mb_pool,
>+ first_seg);
>+ } else {
>+ first_seg = mbuf;
>+ }
>+
>+ /* Get the RSS hash */
>+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
>+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>+
>+ /* Mbuf populate */
>+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
>+ mbuf->data_len = data_len;
>+
>+err_set:
>+ rxq->cur++;
>+ rxq->sw_ring[idx++] = tmbuf;
>+ desc->read.baddr =
>+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
>+ memset((void *)(&desc->read.desc2), 0, 8);
>+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
>+ rxq->dirty++;
>+
>+ if (!eop) {
>+ rte_pktmbuf_free(mbuf);
>+ goto next_desc;
>+ }
>+
>+ first_seg->pkt_len = pkt_len;
>+ rxq->bytes += pkt_len;
>+ mbuf->next = NULL;
>+
>+ first_seg->port = rxq->port_id;
>+ if (rxq->pdata->rx_csum_enable) {
>+ mbuf->ol_flags = 0;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
>+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
>+ } else if (unlikely(error_status
>+ == AXGBE_L4_CSUM_ERR)) {
>+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
>+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
>+ }
>+ }
>+
>+ rx_pkts[nb_rx++] = first_seg;
>+
>+ /* Setup receipt context for a new packet.*/
>+ first_seg = NULL;
>+ }
>+
>+ /* Save receive context.*/
>+ rxq->pkts += nb_rx;
>+
>+ if (rxq->dirty != old_dirty) {
>+ rte_wmb();
>+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
>+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
>+ low32_value(rxq->ring_phys_addr +
>+ (idx * sizeof(union axgbe_rx_desc))));
>+ }
>+ return nb_rx;
>+}
>+
> /* Tx Apis */
> static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) { diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index a21537df9..f6796b09b 100644
>--- a/drivers/net/axgbe/axgbe_rxtx.h
>+++ b/drivers/net/axgbe/axgbe_rxtx.h
>@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
> uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
>--
>2.17.1
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx
@ 2020-02-25 12:40 ssebasti
2020-02-26 5:47 ` Kumar, Ravi1
0 siblings, 1 reply; 11+ messages in thread
From: ssebasti @ 2020-02-25 12:40 UTC (permalink / raw)
To: dev; +Cc: Ravi1.Kumar
From: Selwin Sebastian <selwin.sebastian@amd.com>
Enable scattered rx support and add jumbo packet transmit capability
Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
---
doc/guides/nics/features/axgbe.ini | 1 +
drivers/net/axgbe/axgbe_common.h | 2 +
drivers/net/axgbe/axgbe_ethdev.c | 18 +++-
drivers/net/axgbe/axgbe_rxtx.c | 146 +++++++++++++++++++++++++++++
drivers/net/axgbe/axgbe_rxtx.h | 2 +
5 files changed, 168 insertions(+), 1 deletion(-)
diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
index ab4da559f..0becaa097 100644
--- a/doc/guides/nics/features/axgbe.ini
+++ b/doc/guides/nics/features/axgbe.ini
@@ -7,6 +7,7 @@
Speed capabilities = Y
Link status = Y
Jumbo frame = Y
+Scattered Rx = Y
Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index fdb037dd5..fbd46150c 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -1135,6 +1135,8 @@
#define RX_NORMAL_DESC3_PL_WIDTH 14
#define RX_NORMAL_DESC3_RSV_INDEX 26
#define RX_NORMAL_DESC3_RSV_WIDTH 1
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_DESC3_L34T_IPV4_TCP 1
#define RX_DESC3_L34T_IPV4_UDP 2
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index d0b6f091f..eb2f51f89 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -789,11 +789,17 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_TSO |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_TCP_CKSUM;
if (pdata->hw_feat.rss) {
@@ -1018,9 +1024,19 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
uint32_t reg, mac_lo, mac_hi;
int ret;
+ struct rte_eth_dev_info dev_info = { 0 };
eth_dev->dev_ops = &axgbe_eth_dev_ops;
- eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
+ eth_dev->dev_ops->dev_infos_get(eth_dev, &dev_info);
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
+
+ /* Scatter Rx handling */
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
+ else
+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
/*
* For secondary processes, we don't initialise any further as primary
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 96055c25b..57e2bbb34 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -307,6 +307,152 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint16_t nb_rx = 0;
+ struct axgbe_rx_queue *rxq = rx_queue;
+ volatile union axgbe_rx_desc *desc;
+
+ uint64_t old_dirty = rxq->dirty;
+ struct rte_mbuf *first_seg = NULL;
+ struct rte_mbuf *mbuf, *tmbuf;
+ unsigned int err;
+ uint32_t error_status;
+ uint16_t idx, pidx, data_len = 0, pkt_len = 0;
+
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+ while (nb_rx < nb_pkts) {
+ bool eop = 0;
+next_desc:
+ if (unlikely(idx == rxq->nb_desc))
+ idx = 0;
+
+ desc = &rxq->desc[idx];
+
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+ break;
+
+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!tmbuf)) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u\n",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ pidx = idx + 1;
+ if (unlikely(pidx == rxq->nb_desc))
+ pidx = 0;
+
+ rte_prefetch0(rxq->sw_ring[pidx]);
+ if ((pidx & 0x3) == 0) {
+ rte_prefetch0(&rxq->desc[pidx]);
+ rte_prefetch0(&rxq->sw_ring[pidx]);
+ }
+
+ mbuf = rxq->sw_ring[idx];
+ /* Check for any errors and free mbuf*/
+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ES);
+ error_status = 0;
+ if (unlikely(err)) {
+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+ if ((error_status != AXGBE_L3_CSUM_ERR)
+ && (error_status != AXGBE_L4_CSUM_ERR)) {
+ rxq->errors++;
+ rte_pktmbuf_free(mbuf);
+ goto err_set;
+ }
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+
+ if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, LD)) {
+ eop = 0;
+ pkt_len = rxq->buf_size;
+ data_len = pkt_len;
+ } else {
+ eop = 1;
+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, PL);
+ data_len = pkt_len - rxq->crc_len;
+ }
+
+ if (first_seg != NULL) {
+ if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
+ rte_mempool_put(rxq->mb_pool,
+ first_seg);
+ } else {
+ first_seg = mbuf;
+ }
+
+ /* Get the RSS hash */
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+
+ /* Mbuf populate */
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->data_len = data_len;
+
+err_set:
+ rxq->cur++;
+ rxq->sw_ring[idx++] = tmbuf;
+ desc->read.baddr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+ memset((void *)(&desc->read.desc2), 0, 8);
+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+ rxq->dirty++;
+
+ if (!eop) {
+ rte_pktmbuf_free(mbuf);
+ goto next_desc;
+ }
+
+ first_seg->pkt_len = pkt_len;
+ rxq->bytes += pkt_len;
+ mbuf->next = NULL;
+
+ first_seg->port = rxq->port_id;
+ if (rxq->pdata->rx_csum_enable) {
+ mbuf->ol_flags = 0;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else if (unlikely(error_status
+ == AXGBE_L4_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+
+ rx_pkts[nb_rx++] = first_seg;
+
+ /* Setup receipt context for a new packet.*/
+ first_seg = NULL;
+ }
+
+ /* Save receive context.*/
+ rxq->pkts += nb_rx;
+
+ if (rxq->dirty != old_dirty) {
+ rte_wmb();
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (idx * sizeof(union axgbe_rx_desc))));
+ }
+ return nb_rx;
+}
+
/* Tx Apis */
static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
{
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
index a21537df9..f6796b09b 100644
--- a/drivers/net/axgbe/axgbe_rxtx.h
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
--
2.17.1
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2020-03-05 10:27 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-27 6:33 [dpdk-dev] [PATCH v1] net/axgbe: add support for Scattered Rx ssebasti
2020-02-27 14:28 ` Ferruh Yigit
2020-02-27 14:35 ` Sebastian, Selwin
2020-02-27 14:38 ` Ferruh Yigit
2020-02-28 9:00 ` Sebastian, Selwin
2020-02-28 13:08 ` Ferruh Yigit
2020-02-28 13:09 ` Ferruh Yigit
-- strict thread matches above, loose matches on Subject: below --
2020-02-25 12:40 ssebasti
2020-02-26 5:47 ` Kumar, Ravi1
2020-03-05 10:20 ` Kumar, Ravi1
2020-03-05 10:27 ` Kumar, Ravi1
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).