DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wei Dai <wei.dai@intel.com>
To: wenzhuo.lu@intel.com
Cc: dev@dpdk.org, Wei Dai <wei.dai@intel.com>
Subject: [dpdk-dev] [PATCH 1/2] net/e1000: convert to new Rx offloads API
Date: Fri,  2 Mar 2018 02:54:30 +0800	[thread overview]
Message-ID: <20180301185431.26204-2-wei.dai@intel.com> (raw)
In-Reply-To: <20180301185431.26204-1-wei.dai@intel.com>

Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/e1000/em_ethdev.c  | 32 +++++++++++++++-----
 drivers/net/e1000/em_rxtx.c    | 27 ++++++++++-------
 drivers/net/e1000/igb_ethdev.c | 53 +++++++++++++++++++++------------
 drivers/net/e1000/igb_rxtx.c   | 66 +++++++++++++++++++++++++++++++++---------
 4 files changed, 127 insertions(+), 51 deletions(-)

diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 242375f..acd0d22 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1105,15 +1105,26 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		DEV_RX_OFFLOAD_VLAN_STRIP  |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		DEV_RX_OFFLOAD_UDP_CKSUM   |
+		DEV_RX_OFFLOAD_TCP_CKSUM   |
+		DEV_RX_OFFLOAD_CRC_STRIP   |
+		DEV_RX_OFFLOAD_SCATTER;
+	if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
 		DEV_TX_OFFLOAD_UDP_CKSUM   |
 		DEV_TX_OFFLOAD_TCP_CKSUM;
+	/*
+	 * As only one Rx/Tx queue can be used, let per queue offloading
+	 * capability be same to per port queue offloading capability
+	 * for better compatibility.
+	 */
+	dev_info->rx_queue_offload_capa = dev_info->rx_offload_capa;
 
 	/*
 	 * Starting with 631xESB hw supports 2 TX/RX queues per port.
@@ -1460,15 +1471,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
 static int
 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+
+	rxmode = &dev->data->dev_conf.rxmode;
 	if(mask & ETH_VLAN_STRIP_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_FILTER_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1835,10 +1849,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 02fae10..9b328b1 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -85,6 +85,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
+	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -1382,8 +1383,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-				0 : ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 
 	rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
 	rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
@@ -1395,6 +1396,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
+	rxq->offloads = rx_conf->offloads;
 
 	return 0;
 }
@@ -1646,6 +1648,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 {
 	struct e1000_hw *hw;
 	struct em_rx_queue *rxq;
+	struct rte_eth_rxmode *rxmode;
 	uint32_t rctl;
 	uint32_t rfctl;
 	uint32_t rxcsum;
@@ -1654,6 +1657,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	int ret;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	rxmode = &dev->data->dev_conf.rxmode;
 
 	/*
 	 * Make sure receives are disabled while setting
@@ -1714,8 +1718,8 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 *  call to configure
 		 */
 		rxq->crc_len =
-			(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
-							0 : ETHER_CRC_LEN);
+			(uint8_t)(dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
 
 		bus_addr = rxq->rx_ring_phys_addr;
 		E1000_WRITE_REG(hw, E1000_RDLEN(i),
@@ -1745,7 +1749,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * to avoid splitting packets that don't fit into
 		 * one buffer.
 		 */
-		if (dev->data->dev_conf.rxmode.jumbo_frame ||
+		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
 				rctl_bsize < ETHER_MAX_LEN) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1755,7 +1759,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1768,7 +1772,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1780,21 +1784,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	if ((hw->mac.type == e1000_ich9lan ||
 			hw->mac.type == e1000_pch2lan ||
 			hw->mac.type == e1000_ich10lan) &&
-			dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+			rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
 		E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
 		E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
 	}
 
 	if (hw->mac.type == e1000_pch2lan) {
-		if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 			e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
 		else
 			e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.hw_strip_crc)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
 	else
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
@@ -1814,7 +1818,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		rctl |= E1000_RCTL_LPE;
 	else
 		rctl &= ~E1000_RCTL_LPE;
@@ -1894,6 +1898,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->scattered_rx = dev->data->scattered_rx;
 	qinfo->nb_desc = rxq->nb_rx_desc;
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 3c5138d..7c47171 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -2200,11 +2200,16 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		DEV_RX_OFFLOAD_VLAN_STRIP  |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		DEV_RX_OFFLOAD_UDP_CKSUM   |
+		DEV_RX_OFFLOAD_TCP_CKSUM   |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_CRC_STRIP   |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
@@ -2274,6 +2279,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2330,9 +2336,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM;
+				DEV_RX_OFFLOAD_IPV4_CKSUM  |
+				DEV_RX_OFFLOAD_UDP_CKSUM   |
+				DEV_RX_OFFLOAD_TCP_CKSUM   |
+				DEV_RX_OFFLOAD_JUMBO_FRAME |
+				DEV_RX_OFFLOAD_CRC_STRIP   |
+				DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
@@ -2343,10 +2352,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
 		dev_info->max_tx_queues = 2;
+		dev_info->rx_queue_offload_capa = 0;
 		break;
 	case e1000_vfadapt_i350:
 		dev_info->max_rx_queues = 1;
 		dev_info->max_tx_queues = 1;
+		dev_info->rx_queue_offload_capa = dev_info->rx_offload_capa;
 		break;
 	default:
 		/* Should not happen */
@@ -2361,6 +2372,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2704,7 +2716,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						VLAN_TAG_SIZE);
@@ -2723,7 +2735,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						2 * VLAN_TAG_SIZE);
@@ -2732,22 +2744,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 static int
 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+
+	rxmode = &dev->data->dev_conf.rxmode;
 	if(mask & ETH_VLAN_STRIP_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_FILTER_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -3250,14 +3265,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (!conf->rxmode.hw_strip_crc) {
+	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 1;
+		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #else
-	if (conf->rxmode.hw_strip_crc) {
+	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 0;
+		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #endif
 
@@ -4499,10 +4514,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 2f37167..9c33fda 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -107,6 +107,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
+	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
 };
 
 /**
@@ -1593,6 +1594,19 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq)
 	rxq->pkt_last_seg = NULL;
 }
 
+static int
+igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t rqeuested)
+{
+	struct rte_eth_dev_info dev_info;
+	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+	uint64_t supported; /* All per port offloads */
+
+	dev->dev_ops->dev_infos_get(dev, &dev_info);
+	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+
+	return !((mandatory ^ rqeuested) & supported);
+}
+
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1606,6 +1620,18 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	unsigned int size;
 
+	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		struct rte_eth_dev_info dev_info;
+		dev->dev_ops->dev_infos_get(dev, &dev_info);
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
@@ -1630,6 +1656,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
+	rxq->offloads = rx_conf->offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
@@ -1644,8 +1671,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
-				  ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 
 	/*
 	 *  Allocate RX ring hardware descriptors. A memzone large enough to
@@ -2227,6 +2254,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 int
 eth_igb_rx_init(struct rte_eth_dev *dev)
 {
+	struct rte_eth_rxmode *rxmode;
 	struct e1000_hw     *hw;
 	struct igb_rx_queue *rxq;
 	uint32_t rctl;
@@ -2247,10 +2275,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rctl = E1000_READ_REG(hw, E1000_RCTL);
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
 
+	rxmode = &dev->data->dev_conf.rxmode;
+
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		rctl |= E1000_RCTL_LPE;
 
 		/*
@@ -2292,9 +2322,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		rxq->crc_len =
-			(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
-							0 : ETHER_CRC_LEN);
+		rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
 
 		bus_addr = rxq->rx_ring_phys_addr;
 		E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
@@ -2362,7 +2391,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2406,16 +2435,24 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.hw_ip_checksum)
-		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
-				E1000_RXCSUM_CRCOFL);
+	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
-		rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
-				E1000_RXCSUM_CRCOFL);
+		rxcsum &= ~E1000_RXCSUM_IPOFL;
+	if (rxmode->offloads &
+		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		rxcsum |= E1000_RXCSUM_TUOFL;
+	else
+		rxcsum &= ~E1000_RXCSUM_TUOFL;
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		rxcsum |= E1000_RXCSUM_CRCOFL;
+	else
+		rxcsum &= ~E1000_RXCSUM_CRCOFL;
+
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
 
 		/* set STRCRC bit in all queues */
@@ -2654,7 +2691,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2741,6 +2778,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
 	qinfo->conf.rx_drop_en = rxq->drop_en;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
-- 
2.9.4

  reply	other threads:[~2018-03-01 10:55 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-01 18:54 [dpdk-dev] [PATCH 0/2] net/e1000: convert to new Rx/Tx " Wei Dai
2018-03-01 18:54 ` Wei Dai [this message]
2018-03-01 18:54 ` [dpdk-dev] [PATCH 2/2] net/e1000: convert to new Tx " Wei Dai
2018-04-03  2:54 ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Wei Dai
2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 1/2] net/e1000: convert to new Rx " Wei Dai
2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 2/2] net/e1000: convert to new Tx " Wei Dai
2018-04-03 13:41   ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Zhang, Qi Z
2018-04-03 15:15     ` Zhang, Helin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180301185431.26204-2-wei.dai@intel.com \
    --to=wei.dai@intel.com \
    --cc=dev@dpdk.org \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).