From: Leyi Rong <leyi.rong@intel.com>
To: qi.z.zhang@intel.com, xiaolong.ye@intel.com,
haiyue.wang@intel.com, wenzhuo.lu@intel.com
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 2/6] net/ice: handle the Rx flex descriptor
Date: Thu, 29 Aug 2019 10:34:17 +0800 [thread overview]
Message-ID: <20190829023421.112551-3-leyi.rong@intel.com> (raw)
In-Reply-To: <20190829023421.112551-1-leyi.rong@intel.com>
From: Haiyue Wang <haiyue.wang@intel.com>
Set the RXDID with flex descriptor type by default, change the rx
function to support new descriptor.
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
drivers/net/ice/ice_rxtx.c | 262 +++++++++++++++++--------------------
1 file changed, 121 insertions(+), 141 deletions(-)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 81af81441..3b4ccf151 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -13,7 +13,6 @@
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
-#define ICE_RX_ERR_BITS 0x3f
static enum ice_status
ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
@@ -25,18 +24,9 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
enum ice_status err;
uint16_t buf_size, len;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
uint32_t regval;
- /**
- * The kernel driver uses flex descriptor. It sets the register
- * to flex descriptor mode.
- * DPDK uses legacy descriptor. It should set the register back
- * to the default value, then uses legacy descriptor mode.
- */
- regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
- ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
-
/* Set buffer size as the head split is disabled. */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
@@ -94,6 +84,21 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
rx_ctx.showiv = 0;
rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile ID;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
+
err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
if (err) {
PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
@@ -127,7 +132,6 @@ static int
ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
{
struct ice_rx_entry *rxe = rxq->sw_ring;
- uint64_t dma_addr;
uint16_t i;
for (i = 0; i < rxq->nb_rx_desc; i++) {
@@ -145,11 +149,9 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
- dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
-
rxd = &rxq->rx_ring[i];
- rxd->read.pkt_addr = dma_addr;
+ rxd->read.pkt_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd->read.hdr_addr = 0;
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
rxd->read.rsvd1 = 0;
@@ -961,16 +963,15 @@ uint32_t
ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define ICE_RXQ_SCAN_INTERVAL 4
- volatile union ice_rx_desc *rxdp;
+ volatile union ice_rx_flex_desc *rxdp;
struct ice_rx_queue *rxq;
uint16_t desc = 0;
rxq = dev->data->rx_queues[rx_queue_id];
- rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
- ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
- ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
- (1 << ICE_RX_DESC_STATUS_DD_S)) {
+ rte_le_to_cpu_16(rxdp->wb.status_error0) &
+ (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
/**
* Check the DD bit of a rx descriptor of each 4 in a group,
* to avoid checking too frequently and downgrading performance
@@ -979,79 +980,77 @@ ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
desc += ICE_RXQ_SCAN_INTERVAL;
rxdp += ICE_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
- rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ rxdp = (volatile union ice_rx_flex_desc *)
+ &(rxq->rx_ring[rxq->rx_tail +
desc - rxq->nb_rx_desc]);
}
return desc;
}
-/* Translate the rx descriptor status to pkt flags */
-static inline uint64_t
-ice_rxd_status_to_pkt_flags(uint64_t qword)
-{
- uint64_t flags;
-
- /* Check if RSS_HASH */
- flags = (((qword >> ICE_RX_DESC_STATUS_FLTSTAT_S) &
- ICE_RX_DESC_FLTSTAT_RSS_HASH) ==
- ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
-
- return flags;
-}
+#define ICE_RX_FLEX_ERR0_BITS \
+ ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
+ (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
+ (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
+ (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
+ (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
+ (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
/* Rx L3/L4 checksum */
static inline uint64_t
-ice_rxd_error_to_pkt_flags(uint64_t qword)
+ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
{
uint64_t flags = 0;
- uint64_t error_bits = (qword >> ICE_RXD_QW1_ERROR_S);
- if (likely((error_bits & ICE_RX_ERR_BITS) == 0)) {
+ /* check if HW has decoded the packet and checksum */
+ if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
+ return 0;
+
+ if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
return flags;
}
- if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_IPE_S)))
+ if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
flags |= PKT_RX_IP_CKSUM_BAD;
else
flags |= PKT_RX_IP_CKSUM_GOOD;
- if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_L4E_S)))
+ if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
flags |= PKT_RX_L4_CKSUM_BAD;
else
flags |= PKT_RX_L4_CKSUM_GOOD;
- if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_EIPE_S)))
+ if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
flags |= PKT_RX_EIP_CKSUM_BAD;
return flags;
}
static inline void
-ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
+ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
{
- if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
- (1 << ICE_RX_DESC_STATUS_L2TAG1P_S)) {
+ if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
+ (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->vlan_tci =
- rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
+ rte_le_to_cpu_16(rxdp->wb.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
- rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
+ rte_le_to_cpu_16(rxdp->wb.l2tag1));
} else {
mb->vlan_tci = 0;
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
- (1 << ICE_RX_DESC_EXT_STATUS_L2TAG2P_S)) {
+ if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+ (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
- mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
- rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
- rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
+ rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+ rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
} else {
mb->vlan_tci_outer = 0;
}
@@ -1060,6 +1059,21 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
mb->vlan_tci, mb->vlan_tci_outer);
}
+static inline void
+ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+}
+
#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
#define ICE_LOOK_AHEAD 8
#if (ICE_LOOK_AHEAD != 8)
@@ -1068,25 +1082,23 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
static inline int
ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
{
- volatile union ice_rx_desc *rxdp;
+ volatile union ice_rx_flex_desc *rxdp;
struct ice_rx_entry *rxep;
struct rte_mbuf *mb;
+ uint16_t stat_err0;
uint16_t pkt_len;
- uint64_t qword1;
- uint32_t rx_status;
int32_t s[ICE_LOOK_AHEAD], nb_dd;
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags = 0;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
- qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
- rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S;
+ stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
/* Make sure there is at least 1 packet to receive */
- if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
+ if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
/**
@@ -1096,42 +1108,31 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
/* Read desc statuses backwards to avoid race condition */
- for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) {
- qword1 = rte_le_to_cpu_64(
- rxdp[j].wb.qword1.status_error_len);
- s[j] = (qword1 & ICE_RXD_QW1_STATUS_M) >>
- ICE_RXD_QW1_STATUS_S;
- }
+ for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
+ s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
rte_smp_rmb();
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
- nb_dd += s[j] & (1 << ICE_RX_DESC_STATUS_DD_S);
+ nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
nb_rx += nb_dd;
/* Translate descriptor info to mbuf parameters */
for (j = 0; j < nb_dd; j++) {
mb = rxep[j].mbuf;
- qword1 = rte_le_to_cpu_64(
- rxdp[j].wb.qword1.status_error_len);
- pkt_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
- ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
+ pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
mb->ol_flags = 0;
- pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
- pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
- if (pkt_flags & PKT_RX_RSS_HASH)
- mb->hash.rss =
- rte_le_to_cpu_32(
- rxdp[j].wb.qword0.hi_dword.rss);
- mb->packet_type = ptype_tbl[(uint8_t)(
- (qword1 &
- ICE_RXD_QW1_PTYPE_M) >>
- ICE_RXD_QW1_PTYPE_S)];
+ stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
+ pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
+ mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
+ rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
+ ice_rxd_to_pkt_fields(mb, &rxdp[j]);
mb->ol_flags |= pkt_flags;
}
@@ -1180,7 +1181,6 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
struct ice_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t alloc_idx, i;
- uint64_t dma_addr;
int diag;
/* Allocate buffers in bulk */
@@ -1206,9 +1206,10 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
mb->port = rxq->port_id;
- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+
+ rxdp[i].read.pkt_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
- rxdp[i].read.pkt_addr = dma_addr;
}
/* Update rx tail regsiter */
@@ -1312,8 +1313,8 @@ ice_recv_scattered_pkts(void *rx_queue,
{
struct ice_rx_queue *rxq = rx_queue;
volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
- volatile union ice_rx_desc *rxdp;
- union ice_rx_desc rxd;
+ volatile union ice_rx_flex_desc *rxdp;
+ union ice_rx_flex_desc rxd;
struct ice_rx_entry *sw_ring = rxq->sw_ring;
struct ice_rx_entry *rxe;
struct rte_mbuf *first_seg = rxq->pkt_first_seg;
@@ -1324,21 +1325,17 @@ ice_recv_scattered_pkts(void *rx_queue,
uint16_t nb_rx = 0;
uint16_t nb_hold = 0;
uint16_t rx_packet_len;
- uint32_t rx_status;
- uint64_t qword1;
- uint64_t dma_addr;
- uint64_t pkt_flags = 0;
+ uint16_t rx_stat_err0;
+ uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
struct rte_eth_dev *dev;
while (nb_rx < nb_pkts) {
- rxdp = &rx_ring[rx_id];
- qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
- rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
- ICE_RXD_QW1_STATUS_S;
+ rxdp = (volatile union ice_rx_flex_desc *)&rx_ring[rx_id];
+ rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
/* Check the DD bit first */
- if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
+ if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
break;
/* allocate mbuf */
@@ -1371,20 +1368,16 @@ ice_recv_scattered_pkts(void *rx_queue,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/* Set data buffer address and data length of the mbuf */
+ rxdp->read.pkt_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
- rxdp->read.pkt_addr = dma_addr;
- rx_packet_len = (qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
- ICE_RXD_QW1_LEN_PBUF_S;
+
+ rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
rxm->data_len = rx_packet_len;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
- ice_rxd_to_vlan_tci(rxm, rxdp);
- rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
- ICE_RXD_QW1_PTYPE_M) >>
- ICE_RXD_QW1_PTYPE_S)];
/**
* If this is the first buffer of the received packet, set the
@@ -1410,7 +1403,7 @@ ice_recv_scattered_pkts(void *rx_queue,
* update the pointer to the last mbuf of the current scattered
* packet and continue to parse the RX ring.
*/
- if (!(rx_status & (1 << ICE_RX_DESC_STATUS_EOF_S))) {
+ if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
last_seg = rxm;
continue;
}
@@ -1442,13 +1435,11 @@ ice_recv_scattered_pkts(void *rx_queue,
first_seg->port = rxq->port_id;
first_seg->ol_flags = 0;
-
- pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
- pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
- if (pkt_flags & PKT_RX_RSS_HASH)
- first_seg->hash.rss =
- rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
-
+ first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
+ rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
+ ice_rxd_to_vlan_tci(first_seg, &rxd);
+ ice_rxd_to_pkt_fields(first_seg, &rxd);
+ pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -1538,9 +1529,8 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
int
ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
+ volatile union ice_rx_flex_desc *rxdp;
struct ice_rx_queue *rxq = rx_queue;
- volatile uint64_t *status;
- uint64_t mask;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
@@ -1553,10 +1543,9 @@ ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
- status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
- mask = rte_cpu_to_le_64((1ULL << ICE_RX_DESC_STATUS_DD_S) <<
- ICE_RXD_QW1_STATUS_S);
- if (*status & mask)
+ rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[desc];
+ if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
+ (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
return RTE_ETH_RX_DESC_DONE;
return RTE_ETH_RX_DESC_AVAIL;
@@ -1642,8 +1631,8 @@ ice_recv_pkts(void *rx_queue,
{
struct ice_rx_queue *rxq = rx_queue;
volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
- volatile union ice_rx_desc *rxdp;
- union ice_rx_desc rxd;
+ volatile union ice_rx_flex_desc *rxdp;
+ union ice_rx_flex_desc rxd;
struct ice_rx_entry *sw_ring = rxq->sw_ring;
struct ice_rx_entry *rxe;
struct rte_mbuf *nmb; /* new allocated mbuf */
@@ -1652,21 +1641,17 @@ ice_recv_pkts(void *rx_queue,
uint16_t nb_rx = 0;
uint16_t nb_hold = 0;
uint16_t rx_packet_len;
- uint32_t rx_status;
- uint64_t qword1;
- uint64_t dma_addr;
- uint64_t pkt_flags = 0;
+ uint16_t rx_stat_err0;
+ uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
struct rte_eth_dev *dev;
while (nb_rx < nb_pkts) {
- rxdp = &rx_ring[rx_id];
- qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
- rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
- ICE_RXD_QW1_STATUS_S;
+ rxdp = (volatile union ice_rx_flex_desc *)&rx_ring[rx_id];
+ rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
/* Check the DD bit first */
- if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
+ if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
break;
/* allocate mbuf */
@@ -1685,19 +1670,18 @@ ice_recv_pkts(void *rx_queue,
rx_id = 0;
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/**
* fill the read format of descriptor with physic address in
* new allocated mbuf: nmb
*/
+ rxdp->read.pkt_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
- rxdp->read.pkt_addr = dma_addr;
/* calculate rx_packet_len of the received pkt */
- rx_packet_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
- ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
+ rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
/* fill old mbuf with received descriptor: rxd */
rxm->data_off = RTE_PKTMBUF_HEADROOM;
@@ -1707,15 +1691,11 @@ ice_recv_pkts(void *rx_queue,
rxm->pkt_len = rx_packet_len;
rxm->data_len = rx_packet_len;
rxm->port = rxq->port_id;
- ice_rxd_to_vlan_tci(rxm, rxdp);
- rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
- ICE_RXD_QW1_PTYPE_M) >>
- ICE_RXD_QW1_PTYPE_S)];
- pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
- pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
- if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->hash.rss =
- rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
+ rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
+ ice_rxd_to_vlan_tci(rxm, &rxd);
+ ice_rxd_to_pkt_fields(rxm, &rxd);
+ pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
rxm->ol_flags |= pkt_flags;
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;
--
2.17.1
next prev parent reply other threads:[~2019-08-29 2:44 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-29 2:34 [dpdk-dev] [PATCH 0/6] enable Rx flexible descriptor Leyi Rong
2019-08-29 2:34 ` [dpdk-dev] [PATCH 1/6] net/ice: add Rx flex descriptor definition Leyi Rong
2019-08-29 8:04 ` [dpdk-dev] [PATCH v2 0/6] enable Rx flexible descriptor Leyi Rong
2019-08-29 8:04 ` [dpdk-dev] [PATCH v2 1/6] net/ice: add Rx flex descriptor definition Leyi Rong
2019-08-29 8:04 ` [dpdk-dev] [PATCH v2 2/6] net/ice: handle the Rx flex descriptor Leyi Rong
2019-08-29 8:04 ` [dpdk-dev] [PATCH v2 3/6] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-08-29 8:04 ` [dpdk-dev] [PATCH v2 4/6] net/ice: support more ptype Leyi Rong
2019-08-29 8:04 ` [dpdk-dev] [PATCH v2 5/6] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-08-29 8:04 ` [dpdk-dev] [PATCH v2 6/6] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-08-29 23:31 ` Zhang, Qi Z
2019-08-30 1:05 ` Wang, Haiyue
2019-08-30 1:06 ` Zhang, Qi Z
2019-08-30 6:17 ` Rong, Leyi
2019-08-29 2:34 ` Leyi Rong [this message]
2019-08-29 2:34 ` [dpdk-dev] [PATCH 3/6] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-08-29 2:34 ` [dpdk-dev] [PATCH 4/6] net/ice: support more ptype Leyi Rong
2019-08-29 2:34 ` [dpdk-dev] [PATCH 5/6] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-08-29 2:34 ` [dpdk-dev] [PATCH 6/6] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-09-17 8:53 ` [dpdk-dev] [PATCH v3 0/5] enable Rx flexible descriptor Leyi Rong
2019-09-17 8:53 ` [dpdk-dev] [PATCH v3 1/5] net/ice: add Rx flex descriptor definition Leyi Rong
2019-09-18 21:56 ` Ye Xiaolong
2019-09-17 8:53 ` [dpdk-dev] [PATCH v3 2/5] net/ice: handle the Rx flex descriptor Leyi Rong
2019-09-17 8:53 ` [dpdk-dev] [PATCH v3 3/5] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-09-18 23:30 ` Ye Xiaolong
2019-09-19 1:36 ` Wang, Haiyue
2019-09-19 1:44 ` Wang, Haiyue
2019-09-17 8:53 ` [dpdk-dev] [PATCH v3 4/5] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-09-17 8:53 ` [dpdk-dev] [PATCH v3 5/5] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-09-19 6:25 ` [dpdk-dev] [PATCH v4 0/6] enable Rx flexible descriptor Leyi Rong
2019-09-19 6:25 ` [dpdk-dev] [PATCH v4 1/6] net/ice: add Rx flex descriptor definition Leyi Rong
2019-09-19 6:25 ` [dpdk-dev] [PATCH v4 2/6] net/ice: handle the Rx flex descriptor Leyi Rong
2019-09-23 11:05 ` Ye Xiaolong
2019-09-19 6:25 ` [dpdk-dev] [PATCH v4 3/6] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-09-23 3:25 ` Yang, Qiming
2019-09-23 3:34 ` Wang, Haiyue
2019-09-23 8:29 ` Ye Xiaolong
2019-09-23 11:03 ` Wang, Haiyue
2019-09-23 14:24 ` Ye Xiaolong
2019-09-23 15:00 ` Wang, Haiyue
2019-09-23 15:55 ` Ye Xiaolong
2019-09-19 6:25 ` [dpdk-dev] [PATCH v4 4/6] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-09-19 6:25 ` [dpdk-dev] [PATCH v4 5/6] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-09-19 6:25 ` [dpdk-dev] [PATCH v4 6/6] net/ice: remove Rx legacy descriptor definition Leyi Rong
2019-09-23 14:31 ` Ye Xiaolong
2019-09-19 6:38 ` [dpdk-dev] [PATCH v4 0/6] enable Rx flexible descriptor Zhang, Qi Z
2019-09-24 2:38 ` [dpdk-dev] [PATCH v5 " Leyi Rong
2019-09-24 2:38 ` [dpdk-dev] [PATCH v5 1/6] net/ice: add Rx flex descriptor definition Leyi Rong
2019-09-24 2:38 ` [dpdk-dev] [PATCH v5 2/6] net/ice: handle the Rx flex descriptor Leyi Rong
2019-09-24 2:38 ` [dpdk-dev] [PATCH v5 3/6] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-09-24 9:02 ` Ye Xiaolong
2019-09-24 2:38 ` [dpdk-dev] [PATCH v5 4/6] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-09-24 2:38 ` [dpdk-dev] [PATCH v5 5/6] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-09-24 2:38 ` [dpdk-dev] [PATCH v5 6/6] net/ice: remove Rx legacy descriptor definition Leyi Rong
2019-09-24 9:05 ` [dpdk-dev] [PATCH v5 0/6] enable Rx flexible descriptor Ye Xiaolong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190829023421.112551-3-leyi.rong@intel.com \
--to=leyi.rong@intel.com \
--cc=dev@dpdk.org \
--cc=haiyue.wang@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=wenzhuo.lu@intel.com \
--cc=xiaolong.ye@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).