DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API
@ 2018-01-03 13:12 maciej.czekaj
  2018-01-12 18:34 ` Ferruh Yigit
  2018-01-18 13:06 ` [dpdk-dev] [PATCH v2] " maciej.czekaj
  0 siblings, 2 replies; 9+ messages in thread
From: maciej.czekaj @ 2018-01-03 13:12 UTC (permalink / raw)
  To: dev; +Cc: shahafs, thomas, Maciej Czekaj

From: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>

This patch removes all references to old-style offload API
replacing them with new offload flags.

Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
---
 drivers/net/thunderx/nicvf_ethdev.c | 143 +++++++++++++++++++++++-------------
 drivers/net/thunderx/nicvf_ethdev.h |  14 ++++
 drivers/net/thunderx/nicvf_struct.h |   2 +-
 3 files changed, 107 insertions(+), 52 deletions(-)

diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index d65d3ce..3428ff2 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -179,6 +179,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
 	size_t i;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -204,15 +205,15 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 		return -EINVAL;
 
 	if (frame_size > ETHER_MAX_LEN)
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
 	if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
 		return -EINVAL;
 
 	/* Update max frame size */
-	dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
+	rxmode->max_rx_pkt_len = (uint32_t)frame_size;
 	nic->mtu = mtu;
 
 	for (i = 0; i < nic->sqs_count; i++)
@@ -903,7 +904,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
+		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
 			multiseg = true;
 			break;
 		}
@@ -942,9 +943,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			 const struct rte_eth_txconf *tx_conf)
 {
 	uint16_t tx_free_thresh;
-	uint8_t is_single_pool;
+	bool is_single_pool;
 	struct nicvf_txq *txq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
+	uint64_t conf_offloads, offload_capa, unsupported_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -958,6 +960,17 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
+	conf_offloads = tx_conf->offloads;
+	offload_capa = NICVF_TX_OFFLOAD_CAPA;
+
+	unsupported_offloads = conf_offloads & ~offload_capa;
+	if (unsupported_offloads) {
+		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
+		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+		      unsupported_offloads, conf_offloads, offload_capa);
+		return -ENOTSUP;
+	}
+
 	/* Tx deferred start is not supported */
 	if (tx_conf->tx_deferred_start) {
 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,11 +1020,11 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	txq->nic = nic;
 	txq->queue_id = qidx;
 	txq->tx_free_thresh = tx_free_thresh;
-	txq->txq_flags = tx_conf->txq_flags;
 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-	is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
-				txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
+	txq->offloads = conf_offloads;
+
+	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1042,9 +1055,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_tx_queue_reset(txq);
 
-	PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
+	PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
+			" phys=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
-			txq->phys);
+			txq->phys, txq->offloads);
 
 	dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1270,6 +1284,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint16_t rx_free_thresh;
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
+	uint64_t conf_offloads, offload_capa, unsupported_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1283,6 +1298,24 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
+
+	conf_offloads = rx_conf->offloads;
+
+	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
+		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
+	}
+
+	offload_capa = NICVF_RX_OFFLOAD_CAPA;
+	unsupported_offloads = conf_offloads & ~offload_capa;
+
+	if (unsupported_offloads) {
+		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
+		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+		      unsupported_offloads, conf_offloads, offload_capa);
+		return -ENOTSUP;
+	}
+
 	/* Mempool memory must be contiguous, so must be one memory segment*/
 	if (mp->nb_mem_chunks != 1) {
 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1363,9 +1396,10 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_rx_queue_reset(rxq);
 
-	PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
+	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
+			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-			rte_mempool_avail_count(mp), rxq->phys);
+			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
 
 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1399,13 +1433,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_vfs = pci_dev->max_vfs;
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+	dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+	dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
+	dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+	dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
 
 	dev_info->reta_size = nic->rss_info.rss_size;
 	dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
@@ -1416,16 +1447,16 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
-		.txq_flags =
-			ETH_TXQ_FLAGS_NOMULTSEGS  |
-			ETH_TXQ_FLAGS_NOREFCOUNT  |
-			ETH_TXQ_FLAGS_NOMULTMEMP  |
-			ETH_TXQ_FLAGS_NOVLANOFFL  |
-			ETH_TXQ_FLAGS_NOXSUMSCTP,
+		.txq_flags = ETH_TXQ_FLAGS_IGNORE,
+		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+			DEV_TX_OFFLOAD_UDP_CKSUM          |
+			DEV_TX_OFFLOAD_TCP_CKSUM,
 	};
 }
 
@@ -1466,6 +1497,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 	struct rte_mbuf *mbuf;
 	uint16_t rx_start, rx_end;
 	uint16_t tx_start, tx_end;
+	bool vlan_strip;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1585,7 +1617,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_VLAN_STRIP);
+	nicvf_vlan_hw_strip(nic, vlan_strip);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
 	 * to the 64bit memory address.
@@ -1713,11 +1747,11 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * VLAN_TAG_SIZE > buffsz)
 		dev->data->scattered_rx = 1;
-	if (rx_conf->enable_scatter)
+	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
 		dev->data->scattered_rx = 1;
 
 	/* Setup MTU based on max_rx_pkt_len or default */
-	mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
+	mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
 		dev->data->dev_conf.rxmode.max_rx_pkt_len
 			-  ETHER_HDR_LEN - ETHER_CRC_LEN
 		: ETHER_MTU;
@@ -1891,6 +1925,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint8_t cqcount;
+	uint64_t conf_rx_offloads, rx_offload_capa;
+	uint64_t conf_tx_offloads, tx_offload_capa;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1899,44 +1935,49 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (txmode->mq_mode) {
-		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
-		return -EINVAL;
-	}
+	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
+	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
-		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
-		return -EINVAL;
+	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
+		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
+		      "requested 0x%lx supported 0x%lx\n",
+		      conf_tx_offloads, tx_offload_capa);
+		return -ENOTSUP;
 	}
 
-	if (!rxmode->hw_strip_crc) {
-		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
-		rxmode->hw_strip_crc = 1;
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
+		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
 	}
 
-	if (rxmode->hw_ip_checksum) {
-		PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
-		rxmode->hw_ip_checksum = 0;
+	conf_rx_offloads = rxmode->offloads;
+	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+
+	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
+		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
+		      "requested 0x%lx supported 0x%lx\n",
+		      conf_rx_offloads, rx_offload_capa);
+		return -ENOTSUP;
 	}
 
-	if (rxmode->split_hdr_size) {
-		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
-		return -EINVAL;
+	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
+		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	if (rxmode->hw_vlan_filter) {
-		PMD_INIT_LOG(INFO, "VLAN filter not supported");
+	if (txmode->mq_mode) {
+		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
 		return -EINVAL;
 	}
 
-	if (rxmode->hw_vlan_extend) {
-		PMD_INIT_LOG(INFO, "VLAN extended not supported");
+	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (rxmode->enable_lro) {
-		PMD_INIT_LOG(INFO, "LRO not supported");
+	if (rxmode->split_hdr_size) {
+		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 71bc3cf..7cd0689 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -57,6 +57,20 @@
 	ETH_RSS_GENEVE | \
 	ETH_RSS_NVGRE)
 
+#define NICVF_TX_OFFLOAD_CAPA ( \
+	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
+	DEV_TX_OFFLOAD_UDP_CKSUM        | \
+	DEV_TX_OFFLOAD_TCP_CKSUM        | \
+	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
+	DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define NICVF_RX_OFFLOAD_CAPA ( \
+	DEV_RX_OFFLOAD_VLAN_STRIP  | \
+	DEV_RX_OFFLOAD_CRC_STRIP   | \
+	DEV_RX_OFFLOAD_JUMBO_FRAME | \
+	DEV_RX_OFFLOAD_SCATTER)
+
 #define NICVF_DEFAULT_RX_FREE_THRESH    224
 #define NICVF_DEFAULT_TX_FREE_THRESH    224
 #define NICVF_TX_FREE_MPOOL_THRESH      16
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 0f8208e..cf54524 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -67,7 +67,7 @@ struct nicvf_txq {
 	uint32_t tail;
 	int32_t xmit_bufs;
 	uint32_t qlen_mask;
-	uint32_t txq_flags;
+	uint64_t offloads;
 	uint16_t queue_id;
 	uint16_t tx_free_thresh;
 } __rte_cache_aligned;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API
  2018-01-03 13:12 [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API maciej.czekaj
@ 2018-01-12 18:34 ` Ferruh Yigit
  2018-01-15 14:49   ` Maciej Czekaj
  2018-01-16 13:50   ` Shahaf Shuler
  2018-01-18 13:06 ` [dpdk-dev] [PATCH v2] " maciej.czekaj
  1 sibling, 2 replies; 9+ messages in thread
From: Ferruh Yigit @ 2018-01-12 18:34 UTC (permalink / raw)
  To: maciej.czekaj, dev; +Cc: shahafs, thomas

On 1/3/2018 1:12 PM, maciej.czekaj@caviumnetworks.com wrote:
> From: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
> 
> This patch removes all references to old-style offload API
> replacing them with new offload flags.
> 
> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>

<...>

>  
>  	dev_info->default_txconf = (struct rte_eth_txconf) {
>  		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
> -		.txq_flags =
> -			ETH_TXQ_FLAGS_NOMULTSEGS  |
> -			ETH_TXQ_FLAGS_NOREFCOUNT  |
> -			ETH_TXQ_FLAGS_NOMULTMEMP  |
> -			ETH_TXQ_FLAGS_NOVLANOFFL  |
> -			ETH_TXQ_FLAGS_NOXSUMSCTP,
> +		.txq_flags = ETH_TXQ_FLAGS_IGNORE,

I am not sure about this, Shahafs may comment better, shouldn't application
decide to set "ETH_TXQ_FLAGS_IGNORE" or not, instead of having this in default
configuration?

<...>

> +	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
> +		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
> +		      "requested 0x%lx supported 0x%lx\n",
> +		      conf_tx_offloads, tx_offload_capa);

This is broken for 32bits, using PRIx64 instead of "lx" makes your code more
portable.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API
  2018-01-12 18:34 ` Ferruh Yigit
@ 2018-01-15 14:49   ` Maciej Czekaj
  2018-01-15 17:45     ` Ferruh Yigit
  2018-01-16 13:50   ` Shahaf Shuler
  1 sibling, 1 reply; 9+ messages in thread
From: Maciej Czekaj @ 2018-01-15 14:49 UTC (permalink / raw)
  To: Ferruh Yigit, dev; +Cc: shahafs, thomas





-- Oryginal message --
> On 1/3/2018 1:12 PM, maciej.czekaj@caviumnetworks.com wrote:
>> From: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
>>
>> This patch removes all references to old-style offload API
>> replacing them with new offload flags.
>>
>> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
> <...>
>
>>   
>>   	dev_info->default_txconf = (struct rte_eth_txconf) {
>>   		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
>> -		.txq_flags =
>> -			ETH_TXQ_FLAGS_NOMULTSEGS  |
>> -			ETH_TXQ_FLAGS_NOREFCOUNT  |
>> -			ETH_TXQ_FLAGS_NOMULTMEMP  |
>> -			ETH_TXQ_FLAGS_NOVLANOFFL  |
>> -			ETH_TXQ_FLAGS_NOXSUMSCTP,
>> +		.txq_flags = ETH_TXQ_FLAGS_IGNORE,
> I am not sure about this, Shahafs may comment better, shouldn't application
> decide to set "c" or not, instead of having this in default
> configuration?
I think of it as a safeguard against a legacy application that is using 
old-style txq_flags.

There is an assymetry in API, since
  - rte_eth_tx_queue setup() and rte_eth_rx_queue_setup() convert flags
  - rte_eth_dev_info_get() - does not convert them.

The scenario leading to issues is as follows:

1.Application reads default txq_flags from the rte_eth_dev_info_get().
  - dev_info.txconf.offloads contains flags but it is ignored by legacy code
  - dev_info.txq_flags may be:
       a) txq_flags == 0
       b) txq_flags ==  ETH_TXQ_FLAGS_IGNORE
       c) txq_flags == new-style flags converted to old-style flags


2. Application uses default txq_flags field to rte_eth_tx_queue_setup(). 
Now, depending on txq_flags:

   a) txq_flags == 0, ethdev layer __clears__ all offloads, see *
   b) txq_flags ==  ETH_TXQ_FLAGS_IGNORE, ethdev layer converts offloads 
to txq_flags, but leaves orignal offloads, so PMD can still use them
   c) txq_flags == old-style flags, ethdev layer converts txq_flags to 
offloads, destroying default offloads


* relevant code snippet from rte_eth_tx_queue_setup() with comments:

     if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
         // ==> converts offloads to txq_flags but LEAVES offloads, too
         rte_eth_convert_txq_offloads(tx_conf->offloads,
                          &local_conf.txq_flags);
         /* Keep the ignore flag. */
         local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
     } else {
         // ==> converts txq_flags to offloads but DESTROYS original 
offloads
rte_eth_convert_txq_flags(tx_conf->txq_flags,
                       &local_conf.offloads);
     }


So, out of 3 options:

a) does not work for legacy code
b) will work for legacy code
c) will work but defeats the purpose of removing old-style flags, since 
dev_info() callback has to setup both old-style and new-style default flags

I chose b) as the simplest way to work-around the issue. I could post a 
patch to ethdev API if you think it is important.

>
> <...>
>
>> +	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
>> +		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
>> +		      "requested 0x%lx supported 0x%lx\n",
>> +		      conf_tx_offloads, tx_offload_capa);
> This is broken for 32bits, using PRIx64 instead of "lx" makes your code more
> portable.
Will fix in v2.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API
  2018-01-15 14:49   ` Maciej Czekaj
@ 2018-01-15 17:45     ` Ferruh Yigit
  2018-01-16  9:32       ` Maciej Czekaj
  0 siblings, 1 reply; 9+ messages in thread
From: Ferruh Yigit @ 2018-01-15 17:45 UTC (permalink / raw)
  To: Maciej Czekaj, dev; +Cc: shahafs, thomas

On 1/15/2018 2:49 PM, Maciej Czekaj wrote:
> 
> 
> 
> 
> -- Oryginal message --
>> On 1/3/2018 1:12 PM, maciej.czekaj@caviumnetworks.com wrote:
>>> From: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
>>>
>>> This patch removes all references to old-style offload API
>>> replacing them with new offload flags.
>>>
>>> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
>> <...>
>>
>>>   
>>>   	dev_info->default_txconf = (struct rte_eth_txconf) {
>>>   		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
>>> -		.txq_flags =
>>> -			ETH_TXQ_FLAGS_NOMULTSEGS  |
>>> -			ETH_TXQ_FLAGS_NOREFCOUNT  |
>>> -			ETH_TXQ_FLAGS_NOMULTMEMP  |
>>> -			ETH_TXQ_FLAGS_NOVLANOFFL  |
>>> -			ETH_TXQ_FLAGS_NOXSUMSCTP,
>>> +		.txq_flags = ETH_TXQ_FLAGS_IGNORE,
>> I am not sure about this, Shahafs may comment better, shouldn't application
>> decide to set "c" or not, instead of having this in default
>> configuration?
> I think of it as a safeguard against a legacy application that is using 
> old-style txq_flags.
> 
> There is an assymetry in API, since
>   - rte_eth_tx_queue setup() and rte_eth_rx_queue_setup() convert flags
>   - rte_eth_dev_info_get() - does not convert them.
> 
> The scenario leading to issues is as follows:
> 
> 1.Application reads default txq_flags from the rte_eth_dev_info_get().
>   - dev_info.txconf.offloads contains flags but it is ignored by legacy code
>   - dev_info.txq_flags may be:
>        a) txq_flags == 0
>        b) txq_flags ==  ETH_TXQ_FLAGS_IGNORE
>        c) txq_flags == new-style flags converted to old-style flags
> 
> 
> 2. Application uses default txq_flags field to rte_eth_tx_queue_setup(). 
> Now, depending on txq_flags:
> 
>    a) txq_flags == 0, ethdev layer __clears__ all offloads, see *
>    b) txq_flags ==  ETH_TXQ_FLAGS_IGNORE, ethdev layer converts offloads 
> to txq_flags, but leaves orignal offloads, so PMD can still use them
>    c) txq_flags == old-style flags, ethdev layer converts txq_flags to 
> offloads, destroying default offloads
> 
> 
> * relevant code snippet from rte_eth_tx_queue_setup() with comments:
> 
>      if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
>          // ==> converts offloads to txq_flags but LEAVES offloads, too
>          rte_eth_convert_txq_offloads(tx_conf->offloads,
>                           &local_conf.txq_flags);
>          /* Keep the ignore flag. */
>          local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
>      } else {
>          // ==> converts txq_flags to offloads but DESTROYS original 
> offloads
> rte_eth_convert_txq_flags(tx_conf->txq_flags,
>                        &local_conf.offloads);
>      }
> 
> 
> So, out of 3 options:
> 
> a) does not work for legacy code
> b) will work for legacy code
> c) will work but defeats the purpose of removing old-style flags, since 
> dev_info() callback has to setup both old-style and new-style default flags
> 
> I chose b) as the simplest way to work-around the issue. I could post a 
> patch to ethdev API if you think it is important.

What I understand is txq_flags should be supported because of legacy apps. That
is why ethdev layer converts old txq_flags to offloads when new
ETH_TXQ_FLAGS_IGNORE flag is missing. So that PMD can only use "offloads"
variable for both legacy and new applications.

To support the case application uses defaults from PMD, the one you mentioned
above, I think we should go with option c). For implementation you can use only
new "offloads", so it is not both old-style and new-style, just new-style, but
as default yes should keep both.

And new applications will set ETH_TXQ_FLAGS_IGNORE to say "offloads" has valid
information. Currently this also converted to the txq_flags but this will go
away when all PMDs support new method.

Think  about a case where a legacy application gets defaults from PMD and sets a
few more values in txq_flags and configure the device. When you set
ETH_TXQ_FLAGS_IGNORE as part of defaults, ethdev layer will think only
"offloads" is valid and it will overwrite the txq_flags value with offloads one
and the updates to the txq_flags will be lost.

At one point ETH_TXQ_FLAGS_IGNORE will also go away and applications also will
need to support new method. When it is removed than we can get rid of the
txq_flags defaults from PMDs until than I guess we need to live with them.

> 
>>
>> <...>
>>
>>> +	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
>>> +		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
>>> +		      "requested 0x%lx supported 0x%lx\n",
>>> +		      conf_tx_offloads, tx_offload_capa);
>> This is broken for 32bits, using PRIx64 instead of "lx" makes your code more
>> portable.
> Will fix in v2.
> 
> 
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API
  2018-01-15 17:45     ` Ferruh Yigit
@ 2018-01-16  9:32       ` Maciej Czekaj
  2018-01-16 10:10         ` Ferruh Yigit
  0 siblings, 1 reply; 9+ messages in thread
From: Maciej Czekaj @ 2018-01-16  9:32 UTC (permalink / raw)
  To: Ferruh Yigit, dev; +Cc: shahafs, thomas





-- Oryginal message --
> On 1/15/2018 2:49 PM, Maciej Czekaj wrote:
>>
>>
>>
>> -- Oryginal message --
>>> On 1/3/2018 1:12 PM, maciej.czekaj@caviumnetworks.com wrote:
>>>> From: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
>>>>
>>>> This patch removes all references to old-style offload API
>>>> replacing them with new offload flags.
>>>>
>>>> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
>>> <...>
>>>
>>>>    
>>>>    	dev_info->default_txconf = (struct rte_eth_txconf) {
>>>>    		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
>>>> -		.txq_flags =
>>>> -			ETH_TXQ_FLAGS_NOMULTSEGS  |
>>>> -			ETH_TXQ_FLAGS_NOREFCOUNT  |
>>>> -			ETH_TXQ_FLAGS_NOMULTMEMP  |
>>>> -			ETH_TXQ_FLAGS_NOVLANOFFL  |
>>>> -			ETH_TXQ_FLAGS_NOXSUMSCTP,
>>>> +		.txq_flags = ETH_TXQ_FLAGS_IGNORE,
>>> I am not sure about this, Shahafs may comment better, shouldn't application
>>> decide to set "c" or not, instead of having this in default
>>> configuration?
>> I think of it as a safeguard against a legacy application that is using
>> old-style txq_flags.
>>
>> There is an assymetry in API, since
>>    - rte_eth_tx_queue setup() and rte_eth_rx_queue_setup() convert flags
>>    - rte_eth_dev_info_get() - does not convert them.
>>
>> The scenario leading to issues is as follows:
>>
>> 1.Application reads default txq_flags from the rte_eth_dev_info_get().
>>    - dev_info.txconf.offloads contains flags but it is ignored by legacy code
>>    - dev_info.txq_flags may be:
>>         a) txq_flags == 0
>>         b) txq_flags ==  ETH_TXQ_FLAGS_IGNORE
>>         c) txq_flags == new-style flags converted to old-style flags
>>
>>
>> 2. Application uses default txq_flags field to rte_eth_tx_queue_setup().
>> Now, depending on txq_flags:
>>
>>     a) txq_flags == 0, ethdev layer __clears__ all offloads, see *
>>     b) txq_flags ==  ETH_TXQ_FLAGS_IGNORE, ethdev layer converts offloads
>> to txq_flags, but leaves orignal offloads, so PMD can still use them
>>     c) txq_flags == old-style flags, ethdev layer converts txq_flags to
>> offloads, destroying default offloads
>>
>>
>> * relevant code snippet from rte_eth_tx_queue_setup() with comments:
>>
>>       if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
>>           // ==> converts offloads to txq_flags but LEAVES offloads, too
>>           rte_eth_convert_txq_offloads(tx_conf->offloads,
>>                            &local_conf.txq_flags);
>>           /* Keep the ignore flag. */
>>           local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
>>       } else {
>>           // ==> converts txq_flags to offloads but DESTROYS original
>> offloads
>> rte_eth_convert_txq_flags(tx_conf->txq_flags,
>>                         &local_conf.offloads);
>>       }
>>
>>
>> So, out of 3 options:
>>
>> a) does not work for legacy code
>> b) will work for legacy code
>> c) will work but defeats the purpose of removing old-style flags, since
>> dev_info() callback has to setup both old-style and new-style default flags
>>
>> I chose b) as the simplest way to work-around the issue. I could post a
>> patch to ethdev API if you think it is important.
> What I understand is txq_flags should be supported because of legacy apps. That
> is why ethdev layer converts old txq_flags to offloads when new
> ETH_TXQ_FLAGS_IGNORE flag is missing. So that PMD can only use "offloads"
> variable for both legacy and new applications.
>
> To support the case application uses defaults from PMD, the one you mentioned
> above, I think we should go with option c). For implementation you can use only
> new "offloads", so it is not both old-style and new-style, just new-style, but
> as default yes should keep both.
>
> And new applications will set ETH_TXQ_FLAGS_IGNORE to say "offloads" has valid
> information. Currently this also converted to the txq_flags but this will go
> away when all PMDs support new method.
>
> Think  about a case where a legacy application gets defaults from PMD and sets a
> few more values in txq_flags and configure the device. When you set
> ETH_TXQ_FLAGS_IGNORE as part of defaults, ethdev layer will think only
> "offloads" is valid and it will overwrite the txq_flags value with offloads one
> and the updates to the txq_flags will be lost.
>
> At one point ETH_TXQ_FLAGS_IGNORE will also go away and applications also will
> need to support new method. When it is removed than we can get rid of the
> txq_flags defaults from PMDs until than I guess we need to live with them.

So you suggest that dev_info callback should provide default txq_flags 
for a moment?

E.g.

.txq_flags =
	ETH_TXQ_FLAGS_NOMULTSEGS  |
	ETH_TXQ_FLAGS_NOREFCOUNT  |
	ETH_TXQ_FLAGS_NOMULTMEMP  |
	ETH_TXQ_FLAGS_NOVLANOFFL  |
	ETH_TXQ_FLAGS_NOXSUMSCTP,


That is OK with me. We'll wipe it out later whet it all go away.

Will fix in v2.

>>> <...>
>>>
>>>> +	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
>>>> +		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
>>>> +		      "requested 0x%lx supported 0x%lx\n",
>>>> +		      conf_tx_offloads, tx_offload_capa);
>>> This is broken for 32bits, using PRIx64 instead of "lx" makes your code more
>>> portable.
>> Will fix in v2.
>>
>>
>>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API
  2018-01-16  9:32       ` Maciej Czekaj
@ 2018-01-16 10:10         ` Ferruh Yigit
  0 siblings, 0 replies; 9+ messages in thread
From: Ferruh Yigit @ 2018-01-16 10:10 UTC (permalink / raw)
  To: Maciej Czekaj, dev; +Cc: shahafs, thomas

On 1/16/2018 9:32 AM, Maciej Czekaj wrote:
> 
> 
> 
> 
> -- Oryginal message --
>> On 1/15/2018 2:49 PM, Maciej Czekaj wrote:
>>>
>>>
>>>
>>> -- Oryginal message --
>>>> On 1/3/2018 1:12 PM, maciej.czekaj@caviumnetworks.com wrote:
>>>>> From: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
>>>>>
>>>>> This patch removes all references to old-style offload API
>>>>> replacing them with new offload flags.
>>>>>
>>>>> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
>>>> <...>
>>>>
>>>>>    
>>>>>    	dev_info->default_txconf = (struct rte_eth_txconf) {
>>>>>    		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
>>>>> -		.txq_flags =
>>>>> -			ETH_TXQ_FLAGS_NOMULTSEGS  |
>>>>> -			ETH_TXQ_FLAGS_NOREFCOUNT  |
>>>>> -			ETH_TXQ_FLAGS_NOMULTMEMP  |
>>>>> -			ETH_TXQ_FLAGS_NOVLANOFFL  |
>>>>> -			ETH_TXQ_FLAGS_NOXSUMSCTP,
>>>>> +		.txq_flags = ETH_TXQ_FLAGS_IGNORE,
>>>> I am not sure about this, Shahafs may comment better, shouldn't application
>>>> decide to set "c" or not, instead of having this in default
>>>> configuration?
>>> I think of it as a safeguard against a legacy application that is using
>>> old-style txq_flags.
>>>
>>> There is an assymetry in API, since
>>>    - rte_eth_tx_queue setup() and rte_eth_rx_queue_setup() convert flags
>>>    - rte_eth_dev_info_get() - does not convert them.
>>>
>>> The scenario leading to issues is as follows:
>>>
>>> 1.Application reads default txq_flags from the rte_eth_dev_info_get().
>>>    - dev_info.txconf.offloads contains flags but it is ignored by legacy code
>>>    - dev_info.txq_flags may be:
>>>         a) txq_flags == 0
>>>         b) txq_flags ==  ETH_TXQ_FLAGS_IGNORE
>>>         c) txq_flags == new-style flags converted to old-style flags
>>>
>>>
>>> 2. Application uses default txq_flags field to rte_eth_tx_queue_setup().
>>> Now, depending on txq_flags:
>>>
>>>     a) txq_flags == 0, ethdev layer __clears__ all offloads, see *
>>>     b) txq_flags ==  ETH_TXQ_FLAGS_IGNORE, ethdev layer converts offloads
>>> to txq_flags, but leaves orignal offloads, so PMD can still use them
>>>     c) txq_flags == old-style flags, ethdev layer converts txq_flags to
>>> offloads, destroying default offloads
>>>
>>>
>>> * relevant code snippet from rte_eth_tx_queue_setup() with comments:
>>>
>>>       if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
>>>           // ==> converts offloads to txq_flags but LEAVES offloads, too
>>>           rte_eth_convert_txq_offloads(tx_conf->offloads,
>>>                            &local_conf.txq_flags);
>>>           /* Keep the ignore flag. */
>>>           local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
>>>       } else {
>>>           // ==> converts txq_flags to offloads but DESTROYS original
>>> offloads
>>> rte_eth_convert_txq_flags(tx_conf->txq_flags,
>>>                         &local_conf.offloads);
>>>       }
>>>
>>>
>>> So, out of 3 options:
>>>
>>> a) does not work for legacy code
>>> b) will work for legacy code
>>> c) will work but defeats the purpose of removing old-style flags, since
>>> dev_info() callback has to setup both old-style and new-style default flags
>>>
>>> I chose b) as the simplest way to work-around the issue. I could post a
>>> patch to ethdev API if you think it is important.
>> What I understand is txq_flags should be supported because of legacy apps. That
>> is why ethdev layer converts old txq_flags to offloads when new
>> ETH_TXQ_FLAGS_IGNORE flag is missing. So that PMD can only use "offloads"
>> variable for both legacy and new applications.
>>
>> To support the case application uses defaults from PMD, the one you mentioned
>> above, I think we should go with option c). For implementation you can use only
>> new "offloads", so it is not both old-style and new-style, just new-style, but
>> as default yes should keep both.
>>
>> And new applications will set ETH_TXQ_FLAGS_IGNORE to say "offloads" has valid
>> information. Currently this also converted to the txq_flags but this will go
>> away when all PMDs support new method.
>>
>> Think  about a case where a legacy application gets defaults from PMD and sets a
>> few more values in txq_flags and configure the device. When you set
>> ETH_TXQ_FLAGS_IGNORE as part of defaults, ethdev layer will think only
>> "offloads" is valid and it will overwrite the txq_flags value with offloads one
>> and the updates to the txq_flags will be lost.
>>
>> At one point ETH_TXQ_FLAGS_IGNORE will also go away and applications also will
>> need to support new method. When it is removed than we can get rid of the
>> txq_flags defaults from PMDs until than I guess we need to live with them.
> 
> So you suggest that dev_info callback should provide default txq_flags 
> for a moment?
> 
> E.g.
> 
> .txq_flags =
> 	ETH_TXQ_FLAGS_NOMULTSEGS  |
> 	ETH_TXQ_FLAGS_NOREFCOUNT  |
> 	ETH_TXQ_FLAGS_NOMULTMEMP  |
> 	ETH_TXQ_FLAGS_NOVLANOFFL  |
> 	ETH_TXQ_FLAGS_NOXSUMSCTP,
> 
> 
> That is OK with me. We'll wipe it out later whet it all go away.

Yes, thank you.

> 
> Will fix in v2.
> 
>>>> <...>
>>>>
>>>>> +	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
>>>>> +		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
>>>>> +		      "requested 0x%lx supported 0x%lx\n",
>>>>> +		      conf_tx_offloads, tx_offload_capa);
>>>> This is broken for 32bits, using PRIx64 instead of "lx" makes your code more
>>>> portable.
>>> Will fix in v2.
>>>
>>>
>>>
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API
  2018-01-12 18:34 ` Ferruh Yigit
  2018-01-15 14:49   ` Maciej Czekaj
@ 2018-01-16 13:50   ` Shahaf Shuler
  1 sibling, 0 replies; 9+ messages in thread
From: Shahaf Shuler @ 2018-01-16 13:50 UTC (permalink / raw)
  To: Ferruh Yigit, maciej.czekaj, dev; +Cc: Thomas Monjalon

Friday, January 12, 2018 8:34 PM, Ferruh Yigit:
> On 1/3/2018 1:12 PM, maciej.czekaj@caviumnetworks.com wrote:
> > From: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
> >
> > This patch removes all references to old-style offload API replacing
> > them with new offload flags.
> >
> > Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
> 
> <...>
> 
> >
> >  	dev_info->default_txconf = (struct rte_eth_txconf) {
> >  		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
> > -		.txq_flags =
> > -			ETH_TXQ_FLAGS_NOMULTSEGS  |
> > -			ETH_TXQ_FLAGS_NOREFCOUNT  |
> > -			ETH_TXQ_FLAGS_NOMULTMEMP  |
> > -			ETH_TXQ_FLAGS_NOVLANOFFL  |
> > -			ETH_TXQ_FLAGS_NOXSUMSCTP,
> > +		.txq_flags = ETH_TXQ_FLAGS_IGNORE,
> 
> I am not sure about this, Shahafs may comment better, shouldn't application
> decide to set "ETH_TXQ_FLAGS_IGNORE" or not, instead of having this in
> default configuration?

Yes Ferruh is correct here.
The ETH_TXQ_FLAGS_IGNORE should be set by application to notify the lower layer it is using the new Tx offloads API and therefore not to take into consideration the txq flags configuration.

PMD is not allowed to set it. 
Whether the PMD moved to the new API or not is agnostic to the application, and abstracted by helper functions on ethdev layer.

> 
> <...>
> 
> > +	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
> > +		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
> > +		      "requested 0x%lx supported 0x%lx\n",
> > +		      conf_tx_offloads, tx_offload_capa);
> 
> This is broken for 32bits, using PRIx64 instead of "lx" makes your code more
> portable.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH v2] net/thunderx: Convert ThunderX VNIC PMD to new offload API
  2018-01-03 13:12 [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API maciej.czekaj
  2018-01-12 18:34 ` Ferruh Yigit
@ 2018-01-18 13:06 ` maciej.czekaj
  2018-01-18 16:41   ` Ferruh Yigit
  1 sibling, 1 reply; 9+ messages in thread
From: maciej.czekaj @ 2018-01-18 13:06 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, shahafs, thomas, Maciej Czekaj, Maciej Czekaj

From: Maciej Czekaj <maciej.czekaj@caviunetworks.com>

This patch removes all references to old-style offload API
replacing them with new offload flags.

Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
---
v2:
  * fixed number format from %lx to PRIx64
  * removed ETH_TXQ_FLAGS_IGNORE from txq_flags
  * restored backward-compatible txq_flags
---
 drivers/net/thunderx/nicvf_ethdev.c | 136 ++++++++++++++++++++++++------------
 drivers/net/thunderx/nicvf_ethdev.h |  14 ++++
 drivers/net/thunderx/nicvf_struct.h |   2 +-
 3 files changed, 106 insertions(+), 46 deletions(-)

diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index b9a873f..c42ba30 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -172,6 +172,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
 	size_t i;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -197,15 +198,15 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 		return -EINVAL;
 
 	if (frame_size > ETHER_MAX_LEN)
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
 	else
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
 	if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
 		return -EINVAL;
 
 	/* Update max frame size */
-	dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
+	rxmode->max_rx_pkt_len = (uint32_t)frame_size;
 	nic->mtu = mtu;
 
 	for (i = 0; i < nic->sqs_count; i++)
@@ -896,7 +897,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
+		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
 			multiseg = true;
 			break;
 		}
@@ -935,9 +936,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			 const struct rte_eth_txconf *tx_conf)
 {
 	uint16_t tx_free_thresh;
-	uint8_t is_single_pool;
+	bool is_single_pool;
 	struct nicvf_txq *txq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
+	uint64_t conf_offloads, offload_capa, unsupported_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -951,6 +953,17 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
+	conf_offloads = tx_conf->offloads;
+	offload_capa = NICVF_TX_OFFLOAD_CAPA;
+
+	unsupported_offloads = conf_offloads & ~offload_capa;
+	if (unsupported_offloads) {
+		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
+		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+		      unsupported_offloads, conf_offloads, offload_capa);
+		return -ENOTSUP;
+	}
+
 	/* Tx deferred start is not supported */
 	if (tx_conf->tx_deferred_start) {
 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1000,11 +1013,11 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	txq->nic = nic;
 	txq->queue_id = qidx;
 	txq->tx_free_thresh = tx_free_thresh;
-	txq->txq_flags = tx_conf->txq_flags;
 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-	is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
-				txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
+	txq->offloads = conf_offloads;
+
+	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
 	/* Choose optimum free threshold value for multipool case */
 	if (!is_single_pool) {
@@ -1035,9 +1048,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_tx_queue_reset(txq);
 
-	PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
+	PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
+			" phys=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
-			txq->phys);
+			txq->phys, txq->offloads);
 
 	dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1263,6 +1277,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	uint16_t rx_free_thresh;
 	struct nicvf_rxq *rxq;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
+	uint64_t conf_offloads, offload_capa, unsupported_offloads;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1276,6 +1291,24 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
 		socket_id, nic->node);
 
+
+	conf_offloads = rx_conf->offloads;
+
+	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
+		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
+	}
+
+	offload_capa = NICVF_RX_OFFLOAD_CAPA;
+	unsupported_offloads = conf_offloads & ~offload_capa;
+
+	if (unsupported_offloads) {
+		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
+		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+		      unsupported_offloads, conf_offloads, offload_capa);
+		return -ENOTSUP;
+	}
+
 	/* Mempool memory must be contiguous, so must be one memory segment*/
 	if (mp->nb_mem_chunks != 1) {
 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1356,9 +1389,10 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
 	nicvf_rx_queue_reset(rxq);
 
-	PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
+	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
+			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-			rte_mempool_avail_count(mp), rxq->phys);
+			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
 
 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1392,13 +1426,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_vfs = pci_dev->max_vfs;
 
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_TSO     |
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+	dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+	dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
+	dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+	dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
 
 	dev_info->reta_size = nic->rss_info.rss_size;
 	dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
@@ -1409,6 +1440,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
+		.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -1419,6 +1451,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			ETH_TXQ_FLAGS_NOMULTMEMP  |
 			ETH_TXQ_FLAGS_NOVLANOFFL  |
 			ETH_TXQ_FLAGS_NOXSUMSCTP,
+		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+			DEV_TX_OFFLOAD_UDP_CKSUM          |
+			DEV_TX_OFFLOAD_TCP_CKSUM,
 	};
 }
 
@@ -1459,6 +1495,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 	struct rte_mbuf *mbuf;
 	uint16_t rx_start, rx_end;
 	uint16_t tx_start, tx_end;
+	bool vlan_strip;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1578,7 +1615,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_VLAN_STRIP);
+	nicvf_vlan_hw_strip(nic, vlan_strip);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
 	 * to the 64bit memory address.
@@ -1706,11 +1745,11 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
 					    2 * VLAN_TAG_SIZE > buffsz)
 		dev->data->scattered_rx = 1;
-	if (rx_conf->enable_scatter)
+	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
 		dev->data->scattered_rx = 1;
 
 	/* Setup MTU based on max_rx_pkt_len or default */
-	mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
+	mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
 		dev->data->dev_conf.rxmode.max_rx_pkt_len
 			-  ETHER_HDR_LEN - ETHER_CRC_LEN
 		: ETHER_MTU;
@@ -1884,6 +1923,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode = &conf->txmode;
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 	uint8_t cqcount;
+	uint64_t conf_rx_offloads, rx_offload_capa;
+	uint64_t conf_tx_offloads, tx_offload_capa;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1892,44 +1933,49 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		return -EINVAL;
 	}
 
-	if (txmode->mq_mode) {
-		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
-		return -EINVAL;
-	}
+	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
+	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
 
-	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-		rxmode->mq_mode != ETH_MQ_RX_RSS) {
-		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
-		return -EINVAL;
+	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
+		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
+		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+		      conf_tx_offloads, tx_offload_capa);
+		return -ENOTSUP;
 	}
 
-	if (!rxmode->hw_strip_crc) {
-		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
-		rxmode->hw_strip_crc = 1;
+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
+		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
 	}
 
-	if (rxmode->hw_ip_checksum) {
-		PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
-		rxmode->hw_ip_checksum = 0;
+	conf_rx_offloads = rxmode->offloads;
+	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+
+	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
+		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
+		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+		      conf_rx_offloads, rx_offload_capa);
+		return -ENOTSUP;
 	}
 
-	if (rxmode->split_hdr_size) {
-		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
-		return -EINVAL;
+	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
+		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
 	}
 
-	if (rxmode->hw_vlan_filter) {
-		PMD_INIT_LOG(INFO, "VLAN filter not supported");
+	if (txmode->mq_mode) {
+		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
 		return -EINVAL;
 	}
 
-	if (rxmode->hw_vlan_extend) {
-		PMD_INIT_LOG(INFO, "VLAN extended not supported");
+	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+		rxmode->mq_mode != ETH_MQ_RX_RSS) {
+		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
 		return -EINVAL;
 	}
 
-	if (rxmode->enable_lro) {
-		PMD_INIT_LOG(INFO, "LRO not supported");
+	if (rxmode->split_hdr_size) {
+		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index fd20d2c..da30a9f 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -29,6 +29,20 @@
 	ETH_RSS_GENEVE | \
 	ETH_RSS_NVGRE)
 
+#define NICVF_TX_OFFLOAD_CAPA ( \
+	DEV_TX_OFFLOAD_IPV4_CKSUM       | \
+	DEV_TX_OFFLOAD_UDP_CKSUM        | \
+	DEV_TX_OFFLOAD_TCP_CKSUM        | \
+	DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+	DEV_TX_OFFLOAD_MBUF_FAST_FREE   | \
+	DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define NICVF_RX_OFFLOAD_CAPA ( \
+	DEV_RX_OFFLOAD_VLAN_STRIP  | \
+	DEV_RX_OFFLOAD_CRC_STRIP   | \
+	DEV_RX_OFFLOAD_JUMBO_FRAME | \
+	DEV_RX_OFFLOAD_SCATTER)
+
 #define NICVF_DEFAULT_RX_FREE_THRESH    224
 #define NICVF_DEFAULT_TX_FREE_THRESH    224
 #define NICVF_TX_FREE_MPOOL_THRESH      16
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 429d290..6d0a6b2 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -39,7 +39,7 @@ struct nicvf_txq {
 	uint32_t tail;
 	int32_t xmit_bufs;
 	uint32_t qlen_mask;
-	uint32_t txq_flags;
+	uint64_t offloads;
 	uint16_t queue_id;
 	uint16_t tx_free_thresh;
 } __rte_cache_aligned;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v2] net/thunderx: Convert ThunderX VNIC PMD to new offload API
  2018-01-18 13:06 ` [dpdk-dev] [PATCH v2] " maciej.czekaj
@ 2018-01-18 16:41   ` Ferruh Yigit
  0 siblings, 0 replies; 9+ messages in thread
From: Ferruh Yigit @ 2018-01-18 16:41 UTC (permalink / raw)
  To: maciej.czekaj, dev; +Cc: shahafs, thomas, Maciej Czekaj

On 1/18/2018 1:06 PM, maciej.czekaj@caviumnetworks.com wrote:
> From: Maciej Czekaj <maciej.czekaj@caviunetworks.com>

Author fixed as "Maciej Czekaj <maciej.czekaj@caviumnetworks.com>"

s/caviunetworks/caviumnetworks

> 
> This patch removes all references to old-style offload API
> replacing them with new offload flags.
> 
> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>

Applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-01-18 16:41 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-03 13:12 [dpdk-dev] [PATCH] net/thunderx: Convert ThunderX VNIC PMD to new offload API maciej.czekaj
2018-01-12 18:34 ` Ferruh Yigit
2018-01-15 14:49   ` Maciej Czekaj
2018-01-15 17:45     ` Ferruh Yigit
2018-01-16  9:32       ` Maciej Czekaj
2018-01-16 10:10         ` Ferruh Yigit
2018-01-16 13:50   ` Shahaf Shuler
2018-01-18 13:06 ` [dpdk-dev] [PATCH v2] " maciej.czekaj
2018-01-18 16:41   ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).