DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/2] net/e1000: convert to new Rx/Tx offloads API
@ 2018-03-01 18:54 Wei Dai
  2018-03-01 18:54 ` [dpdk-dev] [PATCH 1/2] net/e1000: convert to new Rx " Wei Dai
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Wei Dai @ 2018-03-01 18:54 UTC (permalink / raw)
  To: wenzhuo.lu; +Cc: dev, Wei Dai

This patch set convert net/e1000 to new Rx/Tx offloads API.
All Rx offloads are per port features.
All Tx offloads of e1000 are per queue and also per packet as they
are enabled in Tx descriptor.
In the new offload API, per queue offload only need to be set in
queue_setup(). So if the maimum number of queues is only one in
Rx or Tx path, let all offloads in the path are per queue for better
convenience.

Wei Dai (2):
  net/e1000: convert to new Rx offloads API
  net/e1000: convert to new Tx offloads API

 drivers/net/e1000/em_ethdev.c  | 33 +++++++++++++++-----
 drivers/net/e1000/em_rxtx.c    | 30 +++++++++++-------
 drivers/net/e1000/igb_ethdev.c | 57 +++++++++++++++++++++++-----------
 drivers/net/e1000/igb_rxtx.c   | 69 +++++++++++++++++++++++++++++++++---------
 4 files changed, 138 insertions(+), 51 deletions(-)

-- 
2.9.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 1/2] net/e1000: convert to new Rx offloads API
  2018-03-01 18:54 [dpdk-dev] [PATCH 0/2] net/e1000: convert to new Rx/Tx offloads API Wei Dai
@ 2018-03-01 18:54 ` Wei Dai
  2018-03-01 18:54 ` [dpdk-dev] [PATCH 2/2] net/e1000: convert to new Tx " Wei Dai
  2018-04-03  2:54 ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Wei Dai
  2 siblings, 0 replies; 8+ messages in thread
From: Wei Dai @ 2018-03-01 18:54 UTC (permalink / raw)
  To: wenzhuo.lu; +Cc: dev, Wei Dai

Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/e1000/em_ethdev.c  | 32 +++++++++++++++-----
 drivers/net/e1000/em_rxtx.c    | 27 ++++++++++-------
 drivers/net/e1000/igb_ethdev.c | 53 +++++++++++++++++++++------------
 drivers/net/e1000/igb_rxtx.c   | 66 +++++++++++++++++++++++++++++++++---------
 4 files changed, 127 insertions(+), 51 deletions(-)

diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 242375f..acd0d22 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1105,15 +1105,26 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		DEV_RX_OFFLOAD_VLAN_STRIP  |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		DEV_RX_OFFLOAD_UDP_CKSUM   |
+		DEV_RX_OFFLOAD_TCP_CKSUM   |
+		DEV_RX_OFFLOAD_CRC_STRIP   |
+		DEV_RX_OFFLOAD_SCATTER;
+	if (dev_info->max_rx_pktlen > ETHER_MAX_LEN)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
 		DEV_TX_OFFLOAD_UDP_CKSUM   |
 		DEV_TX_OFFLOAD_TCP_CKSUM;
+	/*
+	 * As only one Rx/Tx queue can be used, let per queue offloading
+	 * capability be same to per port queue offloading capability
+	 * for better compatibility.
+	 */
+	dev_info->rx_queue_offload_capa = dev_info->rx_offload_capa;
 
 	/*
 	 * Starting with 631xESB hw supports 2 TX/RX queues per port.
@@ -1460,15 +1471,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
 static int
 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+
+	rxmode = &dev->data->dev_conf.rxmode;
 	if(mask & ETH_VLAN_STRIP_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_FILTER_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1835,10 +1849,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 02fae10..9b328b1 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -85,6 +85,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
+	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -1382,8 +1383,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-				0 : ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 
 	rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
 	rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
@@ -1395,6 +1396,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
+	rxq->offloads = rx_conf->offloads;
 
 	return 0;
 }
@@ -1646,6 +1648,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 {
 	struct e1000_hw *hw;
 	struct em_rx_queue *rxq;
+	struct rte_eth_rxmode *rxmode;
 	uint32_t rctl;
 	uint32_t rfctl;
 	uint32_t rxcsum;
@@ -1654,6 +1657,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	int ret;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	rxmode = &dev->data->dev_conf.rxmode;
 
 	/*
 	 * Make sure receives are disabled while setting
@@ -1714,8 +1718,8 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 *  call to configure
 		 */
 		rxq->crc_len =
-			(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
-							0 : ETHER_CRC_LEN);
+			(uint8_t)(dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
 
 		bus_addr = rxq->rx_ring_phys_addr;
 		E1000_WRITE_REG(hw, E1000_RDLEN(i),
@@ -1745,7 +1749,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * to avoid splitting packets that don't fit into
 		 * one buffer.
 		 */
-		if (dev->data->dev_conf.rxmode.jumbo_frame ||
+		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
 				rctl_bsize < ETHER_MAX_LEN) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1755,7 +1759,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1768,7 +1772,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1780,21 +1784,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	if ((hw->mac.type == e1000_ich9lan ||
 			hw->mac.type == e1000_pch2lan ||
 			hw->mac.type == e1000_ich10lan) &&
-			dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+			rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
 		E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
 		E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
 	}
 
 	if (hw->mac.type == e1000_pch2lan) {
-		if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 			e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
 		else
 			e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.hw_strip_crc)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
 	else
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
@@ -1814,7 +1818,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		rctl |= E1000_RCTL_LPE;
 	else
 		rctl &= ~E1000_RCTL_LPE;
@@ -1894,6 +1898,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->scattered_rx = dev->data->scattered_rx;
 	qinfo->nb_desc = rxq->nb_rx_desc;
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 3c5138d..7c47171 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -2200,11 +2200,16 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+	dev_info->rx_queue_offload_capa = 0;
 	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+		DEV_RX_OFFLOAD_VLAN_STRIP  |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		DEV_RX_OFFLOAD_UDP_CKSUM   |
+		DEV_RX_OFFLOAD_TCP_CKSUM   |
+		DEV_RX_OFFLOAD_JUMBO_FRAME |
+		DEV_RX_OFFLOAD_CRC_STRIP   |
+		DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
@@ -2274,6 +2279,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2330,9 +2336,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM;
+				DEV_RX_OFFLOAD_IPV4_CKSUM  |
+				DEV_RX_OFFLOAD_UDP_CKSUM   |
+				DEV_RX_OFFLOAD_TCP_CKSUM   |
+				DEV_RX_OFFLOAD_JUMBO_FRAME |
+				DEV_RX_OFFLOAD_CRC_STRIP   |
+				DEV_RX_OFFLOAD_SCATTER;
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
@@ -2343,10 +2352,12 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
 		dev_info->max_tx_queues = 2;
+		dev_info->rx_queue_offload_capa = 0;
 		break;
 	case e1000_vfadapt_i350:
 		dev_info->max_rx_queues = 1;
 		dev_info->max_tx_queues = 1;
+		dev_info->rx_queue_offload_capa = dev_info->rx_offload_capa;
 		break;
 	default:
 		/* Should not happen */
@@ -2361,6 +2372,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2704,7 +2716,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						VLAN_TAG_SIZE);
@@ -2723,7 +2735,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						2 * VLAN_TAG_SIZE);
@@ -2732,22 +2744,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 static int
 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+
+	rxmode = &dev->data->dev_conf.rxmode;
 	if(mask & ETH_VLAN_STRIP_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_FILTER_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -3250,14 +3265,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (!conf->rxmode.hw_strip_crc) {
+	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 1;
+		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #else
-	if (conf->rxmode.hw_strip_crc) {
+	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 0;
+		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #endif
 
@@ -4499,10 +4514,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 2f37167..9c33fda 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -107,6 +107,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
+	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
 };
 
 /**
@@ -1593,6 +1594,19 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq)
 	rxq->pkt_last_seg = NULL;
 }
 
+static int
+igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t rqeuested)
+{
+	struct rte_eth_dev_info dev_info;
+	uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+	uint64_t supported; /* All per port offloads */
+
+	dev->dev_ops->dev_infos_get(dev, &dev_info);
+	supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+
+	return !((mandatory ^ rqeuested) & supported);
+}
+
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1606,6 +1620,18 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	unsigned int size;
 
+	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		struct rte_eth_dev_info dev_info;
+		dev->dev_ops->dev_infos_get(dev, &dev_info);
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
@@ -1630,6 +1656,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
+	rxq->offloads = rx_conf->offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
@@ -1644,8 +1671,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
-				  ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 
 	/*
 	 *  Allocate RX ring hardware descriptors. A memzone large enough to
@@ -2227,6 +2254,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 int
 eth_igb_rx_init(struct rte_eth_dev *dev)
 {
+	struct rte_eth_rxmode *rxmode;
 	struct e1000_hw     *hw;
 	struct igb_rx_queue *rxq;
 	uint32_t rctl;
@@ -2247,10 +2275,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rctl = E1000_READ_REG(hw, E1000_RCTL);
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
 
+	rxmode = &dev->data->dev_conf.rxmode;
+
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		rctl |= E1000_RCTL_LPE;
 
 		/*
@@ -2292,9 +2322,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		rxq->crc_len =
-			(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
-							0 : ETHER_CRC_LEN);
+		rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
 
 		bus_addr = rxq->rx_ring_phys_addr;
 		E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
@@ -2362,7 +2391,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2406,16 +2435,24 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.hw_ip_checksum)
-		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
-				E1000_RXCSUM_CRCOFL);
+	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
-		rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
-				E1000_RXCSUM_CRCOFL);
+		rxcsum &= ~E1000_RXCSUM_IPOFL;
+	if (rxmode->offloads &
+		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		rxcsum |= E1000_RXCSUM_TUOFL;
+	else
+		rxcsum &= ~E1000_RXCSUM_TUOFL;
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		rxcsum |= E1000_RXCSUM_CRCOFL;
+	else
+		rxcsum &= ~E1000_RXCSUM_CRCOFL;
+
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
 
 		/* set STRCRC bit in all queues */
@@ -2654,7 +2691,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2741,6 +2778,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
 	qinfo->conf.rx_drop_en = rxq->drop_en;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
-- 
2.9.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/e1000: convert to new Tx offloads API
  2018-03-01 18:54 [dpdk-dev] [PATCH 0/2] net/e1000: convert to new Rx/Tx offloads API Wei Dai
  2018-03-01 18:54 ` [dpdk-dev] [PATCH 1/2] net/e1000: convert to new Rx " Wei Dai
@ 2018-03-01 18:54 ` Wei Dai
  2018-04-03  2:54 ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Wei Dai
  2 siblings, 0 replies; 8+ messages in thread
From: Wei Dai @ 2018-03-01 18:54 UTC (permalink / raw)
  To: wenzhuo.lu; +Cc: dev, Wei Dai

Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/e1000/em_ethdev.c  | 1 +
 drivers/net/e1000/em_rxtx.c    | 3 +++
 drivers/net/e1000/igb_ethdev.c | 4 ++++
 drivers/net/e1000/igb_rxtx.c   | 3 +++
 4 files changed, 11 insertions(+)

diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index acd0d22..a9439c2 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1125,6 +1125,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	 * for better compatibility.
 	 */
 	dev_info->rx_queue_offload_capa = dev_info->rx_offload_capa;
+	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
 	/*
 	 * Starting with 631xESB hw supports 2 TX/RX queues per port.
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 9b328b1..6039c97 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -164,6 +164,7 @@ struct em_tx_queue {
 	uint8_t                wthresh;  /**< Write-back threshold register. */
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
+	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
 };
 
 #if 1
@@ -1270,6 +1271,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	em_reset_tx_queue(txq);
 
 	dev->data->tx_queues[queue_idx] = txq;
+	txq->offloads = tx_conf->offloads;
 	return 0;
 }
 
@@ -1916,4 +1918,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+	qinfo->conf.offloads = txq->offloads;
 }
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 7c47171..9396502 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -2217,6 +2217,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_TX_OFFLOAD_TCP_CKSUM   |
 		DEV_TX_OFFLOAD_SCTP_CKSUM  |
 		DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
 	switch (hw->mac.type) {
 	case e1000_82575:
@@ -2289,6 +2290,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			.wthresh = IGB_DEFAULT_TX_WTHRESH,
 		},
 		.txq_flags = 0,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -2348,6 +2350,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 				DEV_TX_OFFLOAD_TCP_CKSUM   |
 				DEV_TX_OFFLOAD_SCTP_CKSUM  |
 				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 	switch (hw->mac.type) {
 	case e1000_vfadapt:
 		dev_info->max_rx_queues = 2;
@@ -2382,6 +2385,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			.wthresh = IGB_DEFAULT_TX_WTHRESH,
 		},
 		.txq_flags = 0,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 9c33fda..0fcd9c4 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -181,6 +181,7 @@ struct igb_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
+	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
 };
 
 #if 1
@@ -1543,6 +1544,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
 	dev->data->tx_queues[queue_idx] = txq;
+	txq->offloads = tx_conf->offloads;
 
 	return 0;
 }
@@ -2794,6 +2796,7 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+	qinfo->conf.offloads = txq->offloads;
 }
 
 int
-- 
2.9.4

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx offloads API
  2018-03-01 18:54 [dpdk-dev] [PATCH 0/2] net/e1000: convert to new Rx/Tx offloads API Wei Dai
  2018-03-01 18:54 ` [dpdk-dev] [PATCH 1/2] net/e1000: convert to new Rx " Wei Dai
  2018-03-01 18:54 ` [dpdk-dev] [PATCH 2/2] net/e1000: convert to new Tx " Wei Dai
@ 2018-04-03  2:54 ` Wei Dai
  2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 1/2] net/e1000: convert to new Rx " Wei Dai
                     ` (2 more replies)
  2 siblings, 3 replies; 8+ messages in thread
From: Wei Dai @ 2018-04-03  2:54 UTC (permalink / raw)
  To: wenzhuo.lu, qi.z.zhang; +Cc: dev, Wei Dai

This patch set convert net/e1000 to new Rx/Tx offloads API.
All Rx offloads are per port features.
All Tx offloads of e1000 are per queue and also per packet as they
are enabled in Tx descriptor.
In the new offload API, per queue offload only need to be set in
queue_setup(). So if the maimum number of queues is only one in
Rx or Tx path, let all offloads in the path are per queue for better
convenience.

---
v2: add offloads checking


Wei Dai (2):
  net/e1000: convert to new Rx offloads API
  net/e1000: convert to new Tx offloads API

 drivers/net/e1000/e1000_ethdev.h |  14 ++++
 drivers/net/e1000/em_ethdev.c    |  58 ++++++++++----
 drivers/net/e1000/em_rxtx.c      | 155 +++++++++++++++++++++++++++++++++---
 drivers/net/e1000/igb_ethdev.c   |  60 +++++++-------
 drivers/net/e1000/igb_rxtx.c     | 167 +++++++++++++++++++++++++++++++++++----
 5 files changed, 385 insertions(+), 69 deletions(-)

-- 
2.9.5

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v2 1/2] net/e1000: convert to new Rx offloads API
  2018-04-03  2:54 ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Wei Dai
@ 2018-04-03  2:54   ` Wei Dai
  2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 2/2] net/e1000: convert to new Tx " Wei Dai
  2018-04-03 13:41   ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Zhang, Qi Z
  2 siblings, 0 replies; 8+ messages in thread
From: Wei Dai @ 2018-04-03  2:54 UTC (permalink / raw)
  To: wenzhuo.lu, qi.z.zhang; +Cc: dev, Wei Dai

Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/e1000/e1000_ethdev.h |   8 +++
 drivers/net/e1000/em_ethdev.c    |  42 ++++++++++-----
 drivers/net/e1000/em_rxtx.c      |  93 +++++++++++++++++++++++++++++----
 drivers/net/e1000/igb_ethdev.c   |  45 +++++++++-------
 drivers/net/e1000/igb_rxtx.c     | 109 ++++++++++++++++++++++++++++++++++-----
 5 files changed, 240 insertions(+), 57 deletions(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 23b089c..17b5806 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -357,6 +357,9 @@ void eth_igb_rx_queue_release(void *rxq);
 void igb_dev_clear_queues(struct rte_eth_dev *dev);
 void igb_dev_free_queues(struct rte_eth_dev *dev);
 
+uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+
 int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		uint16_t nb_rx_desc, unsigned int socket_id,
 		const struct rte_eth_rxconf *rx_conf,
@@ -417,6 +420,8 @@ void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct rte_eth_txq_info *qinfo);
 
+uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -426,6 +431,9 @@ void eth_em_rx_queue_release(void *rxq);
 void em_dev_clear_queues(struct rte_eth_dev *dev);
 void em_dev_free_queues(struct rte_eth_dev *dev);
 
+uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+
 int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		uint16_t nb_rx_desc, unsigned int socket_id,
 		const struct rte_eth_rxconf *rx_conf,
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 2035ea8..e2ec4b1 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -451,9 +451,21 @@ eth_em_configure(struct rte_eth_dev *dev)
 {
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+	struct rte_eth_dev_info dev_info;
+	uint64_t rx_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+
+	eth_em_infos_get(dev, &dev_info);
+	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    rx_offloads, dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	PMD_INIT_FUNC_TRACE();
 
 	return 0;
@@ -1017,9 +1029,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu
 	return 0;
 }
 
-static uint32_t
-em_get_max_pktlen(const struct e1000_hw *hw)
+uint32_t
+em_get_max_pktlen(struct rte_eth_dev *dev)
 {
+	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
@@ -1050,13 +1064,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
-	dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
+	dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
@@ -1083,6 +1092,10 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = 1;
 	dev_info->max_tx_queues = 1;
 
+	dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
+	dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
+				    dev_info->rx_queue_offload_capa;
+
 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = E1000_MAX_RING_DESC,
 		.nb_min = E1000_MIN_RING_DESC,
@@ -1406,15 +1419,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
 static int
 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+
+	rxmode = &dev->data->dev_conf.rxmode;
 	if(mask & ETH_VLAN_STRIP_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
 			em_vlan_hw_strip_enable(dev);
 		else
 			em_vlan_hw_strip_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_FILTER_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			em_vlan_hw_filter_enable(dev);
 		else
 			em_vlan_hw_filter_disable(dev);
@@ -1781,10 +1797,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 02fae10..3291b5e 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -85,6 +85,7 @@ struct em_rx_queue {
 	struct em_rx_entry *sw_ring;   /**< address of RX software ring. */
 	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
 	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
+	uint64_t	    offloads;   /**< Offloads of DEV_RX_OFFLOAD_* */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
 	uint16_t            rx_tail;    /**< current value of RDT register. */
 	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
@@ -1313,6 +1314,59 @@ em_reset_rx_queue(struct em_rx_queue *rxq)
 	rxq->pkt_last_seg = NULL;
 }
 
+uint64_t
+em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+	uint64_t rx_offload_capa;
+	uint32_t max_rx_pktlen;
+
+	max_rx_pktlen = em_get_max_pktlen(dev);
+
+	rx_offload_capa =
+		DEV_RX_OFFLOAD_VLAN_STRIP  |
+		DEV_RX_OFFLOAD_VLAN_FILTER |
+		DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		DEV_RX_OFFLOAD_UDP_CKSUM   |
+		DEV_RX_OFFLOAD_TCP_CKSUM   |
+		DEV_RX_OFFLOAD_CRC_STRIP   |
+		DEV_RX_OFFLOAD_SCATTER;
+	if (max_rx_pktlen > ETHER_MAX_LEN)
+		rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	return rx_offload_capa;
+}
+
+uint64_t
+em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+	uint64_t rx_queue_offload_capa;
+
+	/*
+	 * As only one Rx queue can be used, let per queue offloading
+	 * capability be same to per port queue offloading capability
+	 * for better convenience.
+	 */
+	rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
+
+	return rx_queue_offload_capa;
+}
+
+static int
+em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
+	uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
+	uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int
 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 		uint16_t queue_idx,
@@ -1328,6 +1382,19 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64,
+			(void *)dev,
+			rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			em_get_rx_port_offloads_capa(dev),
+			em_get_rx_queue_offloads_capa(dev));
+		return -ENOTSUP;
+	}
+
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
@@ -1382,8 +1449,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-				0 : ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+		DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 
 	rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
 	rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
@@ -1395,6 +1462,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
+	rxq->offloads = rx_conf->offloads;
 
 	return 0;
 }
@@ -1646,6 +1714,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 {
 	struct e1000_hw *hw;
 	struct em_rx_queue *rxq;
+	struct rte_eth_rxmode *rxmode;
 	uint32_t rctl;
 	uint32_t rfctl;
 	uint32_t rxcsum;
@@ -1654,6 +1723,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	int ret;
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	rxmode = &dev->data->dev_conf.rxmode;
 
 	/*
 	 * Make sure receives are disabled while setting
@@ -1714,8 +1784,8 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 *  call to configure
 		 */
 		rxq->crc_len =
-			(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
-							0 : ETHER_CRC_LEN);
+			(uint8_t)(dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
 
 		bus_addr = rxq->rx_ring_phys_addr;
 		E1000_WRITE_REG(hw, E1000_RDLEN(i),
@@ -1745,7 +1815,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * to avoid splitting packets that don't fit into
 		 * one buffer.
 		 */
-		if (dev->data->dev_conf.rxmode.jumbo_frame ||
+		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
 				rctl_bsize < ETHER_MAX_LEN) {
 			if (!dev->data->scattered_rx)
 				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1755,7 +1825,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		}
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1768,7 +1838,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	 */
 	rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
 
-	if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
 		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
 		rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1780,21 +1850,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	if ((hw->mac.type == e1000_ich9lan ||
 			hw->mac.type == e1000_pch2lan ||
 			hw->mac.type == e1000_ich10lan) &&
-			dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+			rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
 		E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
 		E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
 	}
 
 	if (hw->mac.type == e1000_pch2lan) {
-		if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 			e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
 		else
 			e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
 	}
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.hw_strip_crc)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
 	else
 		rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
@@ -1814,7 +1884,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		rctl |= E1000_RCTL_LPE;
 	else
 		rctl &= ~E1000_RCTL_LPE;
@@ -1894,6 +1964,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->scattered_rx = dev->data->scattered_rx;
 	qinfo->nb_desc = rxq->nb_rx_desc;
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index d7eef9a..97c7089 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -2148,11 +2148,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+	dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+				    dev_info->rx_queue_offload_capa;
 	dev_info->tx_offload_capa =
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
@@ -2222,6 +2220,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2277,10 +2276,6 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM;
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
@@ -2301,6 +2296,10 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		break;
 	}
 
+	dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+	dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+				    dev_info->rx_queue_offload_capa;
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
 			.pthresh = IGB_DEFAULT_RX_PTHRESH,
@@ -2309,6 +2308,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = 0,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2644,7 +2644,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						VLAN_TAG_SIZE);
@@ -2663,7 +2663,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
 	/* Update maximum packet length */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
 		E1000_WRITE_REG(hw, E1000_RLPML,
 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						2 * VLAN_TAG_SIZE);
@@ -2672,22 +2672,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 static int
 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+	struct rte_eth_rxmode *rxmode;
+
+	rxmode = &dev->data->dev_conf.rxmode;
 	if(mask & ETH_VLAN_STRIP_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
 			igb_vlan_hw_strip_enable(dev);
 		else
 			igb_vlan_hw_strip_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_FILTER_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
 			igb_vlan_hw_filter_enable(dev);
 		else
 			igb_vlan_hw_filter_disable(dev);
 	}
 
 	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
 			igb_vlan_hw_extend_enable(dev);
 		else
 			igb_vlan_hw_extend_disable(dev);
@@ -3189,14 +3192,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	 * Keep the persistent behavior the same as Host PF
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
-	if (!conf->rxmode.hw_strip_crc) {
+	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 1;
+		conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #else
-	if (conf->rxmode.hw_strip_crc) {
+	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 0;
+		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 #endif
 
@@ -4438,10 +4441,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
 	/* switch to jumbo mode if needed */
 	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		dev->data->dev_conf.rxmode.offloads |=
+			DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl |= E1000_RCTL_LPE;
 	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		dev->data->dev_conf.rxmode.offloads &=
+			~DEV_RX_OFFLOAD_JUMBO_FRAME;
 		rctl &= ~E1000_RCTL_LPE;
 	}
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 009f0ea..450beea 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -107,6 +107,7 @@ struct igb_rx_queue {
 	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
+	uint64_t	    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
 };
 
 /**
@@ -1593,6 +1594,61 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq)
 	rxq->pkt_last_seg = NULL;
 }
 
+uint64_t
+igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+	uint64_t rx_offload_capa;
+
+	RTE_SET_USED(dev);
+	rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP  |
+			  DEV_RX_OFFLOAD_VLAN_FILTER |
+			  DEV_RX_OFFLOAD_IPV4_CKSUM  |
+			  DEV_RX_OFFLOAD_UDP_CKSUM   |
+			  DEV_RX_OFFLOAD_TCP_CKSUM   |
+			  DEV_RX_OFFLOAD_JUMBO_FRAME |
+			  DEV_RX_OFFLOAD_CRC_STRIP   |
+			  DEV_RX_OFFLOAD_SCATTER;
+
+	return rx_offload_capa;
+}
+
+uint64_t
+igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint64_t rx_queue_offload_capa;
+
+	switch (hw->mac.type) {
+	case e1000_vfadapt_i350:
+		/*
+		 * As only one Rx queue can be used, let per queue offloading
+		 * capability be same to per port queue offloading capability
+		 * for better convenience.
+		 */
+		rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
+		break;
+	default:
+		rx_queue_offload_capa = 0;
+	}
+	return rx_queue_offload_capa;
+}
+
+static int
+igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
+	uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
+	uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int
 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1606,6 +1662,19 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	unsigned int size;
 
+	if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64,
+			(void *)dev,
+			rx_conf->offloads,
+			dev->data->dev_conf.rxmode.offloads,
+			igb_get_rx_port_offloads_capa(dev),
+			igb_get_rx_queue_offloads_capa(dev));
+		return -ENOTSUP;
+	}
+
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
@@ -1630,6 +1699,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 			  RTE_CACHE_LINE_SIZE);
 	if (rxq == NULL)
 		return -ENOMEM;
+	rxq->offloads = rx_conf->offloads;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->pthresh = rx_conf->rx_thresh.pthresh;
@@ -1644,8 +1714,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
-				  ETHER_CRC_LEN);
+	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
 
 	/*
 	 *  Allocate RX ring hardware descriptors. A memzone large enough to
@@ -2227,6 +2297,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 int
 eth_igb_rx_init(struct rte_eth_dev *dev)
 {
+	struct rte_eth_rxmode *rxmode;
 	struct e1000_hw     *hw;
 	struct igb_rx_queue *rxq;
 	uint32_t rctl;
@@ -2247,10 +2318,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rctl = E1000_READ_REG(hw, E1000_RCTL);
 	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
 
+	rxmode = &dev->data->dev_conf.rxmode;
+
 	/*
 	 * Configure support of jumbo frames, if any.
 	 */
-	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 		rctl |= E1000_RCTL_LPE;
 
 		/*
@@ -2292,9 +2365,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		 * Reset crc_len in case it was changed after queue setup by a
 		 *  call to configure
 		 */
-		rxq->crc_len =
-			(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
-							0 : ETHER_CRC_LEN);
+		rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
 
 		bus_addr = rxq->rx_ring_phys_addr;
 		E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
@@ -2362,7 +2434,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2406,16 +2478,24 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	rxcsum |= E1000_RXCSUM_PCSD;
 
 	/* Enable both L3/L4 rx checksum offload */
-	if (dev->data->dev_conf.rxmode.hw_ip_checksum)
-		rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
-				E1000_RXCSUM_CRCOFL);
+	if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+		rxcsum |= E1000_RXCSUM_IPOFL;
 	else
-		rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
-				E1000_RXCSUM_CRCOFL);
+		rxcsum &= ~E1000_RXCSUM_IPOFL;
+	if (rxmode->offloads &
+		(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+		rxcsum |= E1000_RXCSUM_TUOFL;
+	else
+		rxcsum &= ~E1000_RXCSUM_TUOFL;
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		rxcsum |= E1000_RXCSUM_CRCOFL;
+	else
+		rxcsum &= ~E1000_RXCSUM_CRCOFL;
+
 	E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
 
 	/* Setup the Receive Control Register. */
-	if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
 		rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
 
 		/* set STRCRC bit in all queues */
@@ -2654,7 +2734,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
 	}
 
-	if (dev->data->dev_conf.rxmode.enable_scatter) {
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
 		if (!dev->data->scattered_rx)
 			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2741,6 +2821,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
 	qinfo->conf.rx_drop_en = rxq->drop_en;
+	qinfo->conf.offloads = rxq->offloads;
 }
 
 void
-- 
2.9.5

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v2 2/2] net/e1000: convert to new Tx offloads API
  2018-04-03  2:54 ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Wei Dai
  2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 1/2] net/e1000: convert to new Rx " Wei Dai
@ 2018-04-03  2:54   ` Wei Dai
  2018-04-03 13:41   ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Zhang, Qi Z
  2 siblings, 0 replies; 8+ messages in thread
From: Wei Dai @ 2018-04-03  2:54 UTC (permalink / raw)
  To: wenzhuo.lu, qi.z.zhang; +Cc: dev, Wei Dai

Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/e1000/e1000_ethdev.h |  6 ++++
 drivers/net/e1000/em_ethdev.c    | 16 +++++++----
 drivers/net/e1000/em_rxtx.c      | 62 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/e1000/igb_ethdev.c   | 15 +++++-----
 drivers/net/e1000/igb_rxtx.c     | 58 +++++++++++++++++++++++++++++++++++++
 5 files changed, 145 insertions(+), 12 deletions(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 17b5806..6354b89 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -373,6 +373,9 @@ int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
 int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
 
+uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
 int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 		uint16_t nb_tx_desc, unsigned int socket_id,
 		const struct rte_eth_txconf *tx_conf);
@@ -447,6 +450,9 @@ int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
 int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
 
+uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
 int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 		uint16_t nb_tx_desc, unsigned int socket_id,
 		const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index e2ec4b1..d37df8a 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -453,6 +453,7 @@ eth_em_configure(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct rte_eth_dev_info dev_info;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
@@ -465,6 +466,13 @@ eth_em_configure(struct rte_eth_dev *dev)
 			    rx_offloads, dev_info.rx_offload_capa);
 		return -ENOTSUP;
 	}
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+	if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+		PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+			    "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+			    tx_offloads, dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1066,11 +1074,6 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
 	dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
 
 	/*
 	 * Starting with 631xESB hw supports 2 TX/RX queues per port.
@@ -1095,6 +1098,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
 				    dev_info->rx_queue_offload_capa;
+	dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
+	dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
+				    dev_info->tx_queue_offload_capa;
 
 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = E1000_MAX_RING_DESC,
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3291b5e..2b3c63e 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -164,6 +164,7 @@ struct em_tx_queue {
 	uint8_t                wthresh;  /**< Write-back threshold register. */
 	struct em_ctx_info ctx_cache;
 	/**< Hardware context history.*/
+	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
 };
 
 #if 1
@@ -1152,6 +1153,52 @@ em_reset_tx_queue(struct em_tx_queue *txq)
 	memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
 }
 
+uint64_t
+em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+	uint64_t tx_offload_capa;
+
+	RTE_SET_USED(dev);
+	tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM;
+
+	return tx_offload_capa;
+}
+
+uint64_t
+em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+	uint64_t tx_queue_offload_capa;
+
+	/*
+	 * As only one Tx queue can be used, let per queue offloading
+	 * capability be same to per port queue offloading capability
+	 * for better convenience.
+	 */
+	tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev);
+
+	return tx_queue_offload_capa;
+}
+
+static int
+em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+	uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
+	uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int
 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1167,6 +1214,19 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64,
+			(void *)dev,
+			tx_conf->offloads,
+			dev->data->dev_conf.txmode.offloads,
+			em_get_tx_port_offloads_capa(dev),
+			em_get_tx_queue_offloads_capa(dev));
+		return -ENOTSUP;
+	}
+
 	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
@@ -1270,6 +1330,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	em_reset_tx_queue(txq);
 
 	dev->data->tx_queues[queue_idx] = txq;
+	txq->offloads = tx_conf->offloads;
 	return 0;
 }
 
@@ -1982,4 +2043,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+	qinfo->conf.offloads = txq->offloads;
 }
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 97c7089..8d42266 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -2151,13 +2151,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
 				    dev_info->rx_queue_offload_capa;
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+	dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+				    dev_info->tx_queue_offload_capa;
 
 	switch (hw->mac.type) {
 	case e1000_82575:
@@ -2230,6 +2226,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			.wthresh = IGB_DEFAULT_TX_WTHRESH,
 		},
 		.txq_flags = 0,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -2299,6 +2296,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
 				    dev_info->rx_queue_offload_capa;
+	dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+	dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+				    dev_info->tx_queue_offload_capa;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -2318,6 +2318,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			.wthresh = IGB_DEFAULT_TX_WTHRESH,
 		},
 		.txq_flags = 0,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 450beea..323913f 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -181,6 +181,7 @@ struct igb_tx_queue {
 	/**< Start context position for transmit queue. */
 	struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
 	/**< Hardware context history.*/
+	uint64_t	       offloads; /**< offloads of DEV_TX_OFFLOAD_* */
 };
 
 #if 1
@@ -1448,6 +1449,48 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
 	igb_reset_tx_queue_stat(txq);
 }
 
+uint64_t
+igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+	uint64_t rx_offload_capa;
+
+	RTE_SET_USED(dev);
+	rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+			  DEV_TX_OFFLOAD_IPV4_CKSUM  |
+			  DEV_TX_OFFLOAD_UDP_CKSUM   |
+			  DEV_TX_OFFLOAD_TCP_CKSUM   |
+			  DEV_TX_OFFLOAD_SCTP_CKSUM  |
+			  DEV_TX_OFFLOAD_TCP_TSO;
+
+	return rx_offload_capa;
+}
+
+uint64_t
+igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+	uint64_t rx_queue_offload_capa;
+
+	rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
+
+	return rx_queue_offload_capa;
+}
+
+static int
+igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+	uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+	uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
+	uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
+
+	if ((requested & (queue_supported | port_supported)) != requested)
+		return 0;
+
+	if ((port_offloads ^ requested) & port_supported)
+		return 0;
+
+	return 1;
+}
+
 int
 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -1460,6 +1503,19 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	struct e1000_hw     *hw;
 	uint32_t size;
 
+	if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported port offloads 0x%" PRIx64
+			" or supported queue offloads 0x%" PRIx64,
+			(void *)dev,
+			tx_conf->offloads,
+			dev->data->dev_conf.txmode.offloads,
+			igb_get_tx_port_offloads_capa(dev),
+			igb_get_tx_queue_offloads_capa(dev));
+		return -ENOTSUP;
+	}
+
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
@@ -1543,6 +1599,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	dev->tx_pkt_burst = eth_igb_xmit_pkts;
 	dev->tx_pkt_prepare = &eth_igb_prep_pkts;
 	dev->data->tx_queues[queue_idx] = txq;
+	txq->offloads = tx_conf->offloads;
 
 	return 0;
 }
@@ -2837,6 +2894,7 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+	qinfo->conf.offloads = txq->offloads;
 }
 
 int
-- 
2.9.5

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx offloads API
  2018-04-03  2:54 ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Wei Dai
  2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 1/2] net/e1000: convert to new Rx " Wei Dai
  2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 2/2] net/e1000: convert to new Tx " Wei Dai
@ 2018-04-03 13:41   ` Zhang, Qi Z
  2018-04-03 15:15     ` Zhang, Helin
  2 siblings, 1 reply; 8+ messages in thread
From: Zhang, Qi Z @ 2018-04-03 13:41 UTC (permalink / raw)
  To: Dai, Wei, Lu, Wenzhuo; +Cc: dev



> -----Original Message-----
> From: Dai, Wei
> Sent: Tuesday, April 3, 2018 10:55 AM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> Subject: [PATCH v2 0/2] net/e1000: convert to new Rx/Tx offloads API
> 
> This patch set convert net/e1000 to new Rx/Tx offloads API.
> All Rx offloads are per port features.
> All Tx offloads of e1000 are per queue and also per packet as they are enabled
> in Tx descriptor.
> In the new offload API, per queue offload only need to be set in
> queue_setup(). So if the maimum number of queues is only one in Rx or Tx
> path, let all offloads in the path are per queue for better convenience.
> 
> ---
> v2: add offloads checking
> 
> 
> Wei Dai (2):
>   net/e1000: convert to new Rx offloads API
>   net/e1000: convert to new Tx offloads API
> 
>  drivers/net/e1000/e1000_ethdev.h |  14 ++++
>  drivers/net/e1000/em_ethdev.c    |  58 ++++++++++----
>  drivers/net/e1000/em_rxtx.c      | 155
> +++++++++++++++++++++++++++++++++---
>  drivers/net/e1000/igb_ethdev.c   |  60 +++++++-------
>  drivers/net/e1000/igb_rxtx.c     | 167
> +++++++++++++++++++++++++++++++++++----
>  5 files changed, 385 insertions(+), 69 deletions(-)
> 
> --
> 2.9.5

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx offloads API
  2018-04-03 13:41   ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Zhang, Qi Z
@ 2018-04-03 15:15     ` Zhang, Helin
  0 siblings, 0 replies; 8+ messages in thread
From: Zhang, Helin @ 2018-04-03 15:15 UTC (permalink / raw)
  To: Zhang, Qi Z, Dai, Wei, Lu, Wenzhuo; +Cc: dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> Sent: Tuesday, April 3, 2018 9:42 PM
> To: Dai, Wei; Lu, Wenzhuo
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx
> offloads API
> 
> 
> 
> > -----Original Message-----
> > From: Dai, Wei
> > Sent: Tuesday, April 3, 2018 10:55 AM
> > To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>
> > Cc: dev@dpdk.org; Dai, Wei <wei.dai@intel.com>
> > Subject: [PATCH v2 0/2] net/e1000: convert to new Rx/Tx offloads API
> >
> > This patch set convert net/e1000 to new Rx/Tx offloads API.
> > All Rx offloads are per port features.
> > All Tx offloads of e1000 are per queue and also per packet as they are
> > enabled in Tx descriptor.
> > In the new offload API, per queue offload only need to be set in
> > queue_setup(). So if the maimum number of queues is only one in Rx or
> > Tx path, let all offloads in the path are per queue for better convenience.
> >
> > ---
> > v2: add offloads checking
> >
> >
> > Wei Dai (2):
> >   net/e1000: convert to new Rx offloads API
> >   net/e1000: convert to new Tx offloads API
> >
> >  drivers/net/e1000/e1000_ethdev.h |  14 ++++
> >  drivers/net/e1000/em_ethdev.c    |  58 ++++++++++----
> >  drivers/net/e1000/em_rxtx.c      | 155
> > +++++++++++++++++++++++++++++++++---
> >  drivers/net/e1000/igb_ethdev.c   |  60 +++++++-------
> >  drivers/net/e1000/igb_rxtx.c     | 167
> > +++++++++++++++++++++++++++++++++++----
> >  5 files changed, 385 insertions(+), 69 deletions(-)
> >
> > --
> > 2.9.5
> 
> Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Series applied to dpdk-next-net-intel, thanks!
/Helin

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2018-04-03 15:15 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-01 18:54 [dpdk-dev] [PATCH 0/2] net/e1000: convert to new Rx/Tx offloads API Wei Dai
2018-03-01 18:54 ` [dpdk-dev] [PATCH 1/2] net/e1000: convert to new Rx " Wei Dai
2018-03-01 18:54 ` [dpdk-dev] [PATCH 2/2] net/e1000: convert to new Tx " Wei Dai
2018-04-03  2:54 ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Wei Dai
2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 1/2] net/e1000: convert to new Rx " Wei Dai
2018-04-03  2:54   ` [dpdk-dev] [PATCH v2 2/2] net/e1000: convert to new Tx " Wei Dai
2018-04-03 13:41   ` [dpdk-dev] [PATCH v2 0/2] net/e1000: convert to new Rx/Tx " Zhang, Qi Z
2018-04-03 15:15     ` Zhang, Helin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).