DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shahaf Shuler <shahafs@mellanox.com>
To: Rafal Kozik <rk@semihalf.com>, "dev@dpdk.org" <dev@dpdk.org>
Cc: "mw@semihalf.com" <mw@semihalf.com>,
	"mk@semihalf.com" <mk@semihalf.com>,
	 "gtzalik@amazon.com" <gtzalik@amazon.com>,
	"evgenys@amazon.com" <evgenys@amazon.com>,
	"matua@amazon.com" <matua@amazon.com>,
	"igorch@amazon.com" <igorch@amazon.com>
Subject: Re: [dpdk-dev] [PATCH 1/2] net/ena: convert to new Tx offloads API
Date: Wed, 17 Jan 2018 06:56:26 +0000	[thread overview]
Message-ID: <VI1PR05MB3149A725C6AE527E388A9A36C3E90@VI1PR05MB3149.eurprd05.prod.outlook.com> (raw)
In-Reply-To: <1516103563-9275-2-git-send-email-rk@semihalf.com>

Tuesday, January 16, 2018 1:53 PM, Rafal Kozik:
> Subject: [dpdk-dev] [PATCH 1/2] net/ena: convert to new Tx offloads API
> 
> Ethdev Tx offloads API has changed since:
> 
> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
> 
> This commit support the new Tx offloads API. Queue configuration is stored
> in ena_ring.offloads. During preparing mbufs for tx, offloads are allowed only
> if appropriate flags in this field are set.
> 
> Signed-off-by: Rafal Kozik <rk@semihalf.com>
> ---
>  drivers/net/ena/ena_ethdev.c | 73
> +++++++++++++++++++++++++++++++++++---------
>  drivers/net/ena/ena_ethdev.h |  3 ++
>  2 files changed, 61 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
> index 22db895..6473776 100644
> --- a/drivers/net/ena/ena_ethdev.c
> +++ b/drivers/net/ena/ena_ethdev.c
> @@ -164,6 +164,14 @@ static const struct ena_stats
> ena_stats_ena_com_strings[] = {
>  #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
>  #define ENA_STATS_ARRAY_ENA_COM
> 	ARRAY_SIZE(ena_stats_ena_com_strings)
> 
> +#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
> +			DEV_TX_OFFLOAD_UDP_CKSUM |\
> +			DEV_TX_OFFLOAD_IPV4_CKSUM |\
> +			DEV_TX_OFFLOAD_TCP_TSO)
> +#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
> +		       PKT_TX_IP_CKSUM |\
> +		       PKT_TX_TCP_SEG)
> +
>  /** Vendor ID used by Amazon devices */  #define
> PCI_VENDOR_ID_AMAZON 0x1D0F
>  /** Amazon devices */
> @@ -227,6 +235,8 @@ static int ena_rss_reta_query(struct rte_eth_dev
> *dev,
>  			      struct rte_eth_rss_reta_entry64 *reta_conf,
>  			      uint16_t reta_size);
>  static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
> +static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter
> *adapter,
> +					      uint64_t offloads);
> 
>  static const struct eth_dev_ops ena_dev_ops = {
>  	.dev_configure        = ena_dev_configure,
> @@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct
> rte_mbuf *mbuf,  }
> 
>  static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
> -				       struct ena_com_tx_ctx *ena_tx_ctx)
> +				       struct ena_com_tx_ctx *ena_tx_ctx,
> +				       uint64_t queue_offloads)
>  {
>  	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
> 
> -	if (mbuf->ol_flags &
> -	    (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
> +	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
> +	    (queue_offloads & QUEUE_OFFLOADS)) {
>  		/* check if TSO is required */
> -		if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
> +		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
> +		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
>  			ena_tx_ctx->tso_enable = true;
> 
>  			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
>  		}
> 
>  		/* check if L3 checksum is needed */
> -		if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
> +		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
> +		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
>  			ena_tx_ctx->l3_csum_enable = true;
> 
>  		if (mbuf->ol_flags & PKT_TX_IPV6) {
> @@ -310,19 +323,17 @@ static inline void ena_tx_mbuf_prepare(struct
> rte_mbuf *mbuf,
>  		}
> 
>  		/* check if L4 checksum is needed */
> -		switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
> -		case PKT_TX_TCP_CKSUM:
> +		if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
> +		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
>  			ena_tx_ctx->l4_proto =
> ENA_ETH_IO_L4_PROTO_TCP;
>  			ena_tx_ctx->l4_csum_enable = true;
> -			break;
> -		case PKT_TX_UDP_CKSUM:
> +		} else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
> +			   (queue_offloads &
> DEV_TX_OFFLOAD_UDP_CKSUM)) {
>  			ena_tx_ctx->l4_proto =
> ENA_ETH_IO_L4_PROTO_UDP;
>  			ena_tx_ctx->l4_csum_enable = true;
> -			break;
> -		default:
> +		} else {
>  			ena_tx_ctx->l4_proto =
> ENA_ETH_IO_L4_PROTO_UNKNOWN;
>  			ena_tx_ctx->l4_csum_enable = false;
> -			break;
>  		}
> 
>  		ena_meta->mss = mbuf->tso_segsz;
> @@ -945,7 +956,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev
> *dev,
>  			      uint16_t queue_idx,
>  			      uint16_t nb_desc,
>  			      __rte_unused unsigned int socket_id,
> -			      __rte_unused const struct rte_eth_txconf
> *tx_conf)
> +			      const struct rte_eth_txconf *tx_conf)
>  {
>  	struct ena_com_create_io_ctx ctx =
>  		/* policy set to _HOST just to satisfy icc compiler */ @@ -
> 982,6 +993,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
>  		return -EINVAL;
>  	}
> 
> +	if (!ena_are_tx_queue_offloads_allowed(adapter, tx_conf-
> >offloads)) {
> +		RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
> +		return -EINVAL;
> +	}

Here is it better to check also the ETH_TXQ_FLAGS_IGNORE.
If application has not yet moved to the new API, then it won't set any port Tx offloads. So for old applications, the ena_are_tx_queue_offloads_allowed is not necessary. 


> +
>  	ena_qid = ENA_IO_TXQ_IDX(queue_idx);
> 
>  	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; @@ -1036,6
> +1052,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
>  	for (i = 0; i < txq->ring_size; i++)
>  		txq->empty_tx_reqs[i] = i;
> 
> +	txq->offloads = tx_conf->offloads;
> +
>  	/* Store pointer to this queue in upper layer */
>  	txq->configured = 1;
>  	dev->data->tx_queues[queue_idx] = txq; @@ -1386,6 +1404,14 @@
> static int ena_dev_configure(struct rte_eth_dev *dev)  {
>  	struct ena_adapter *adapter =
>  		(struct ena_adapter *)(dev->data->dev_private);
> +	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
> +
> +	if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
> +		RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
> +		    "requested 0x%lx supported 0x%lx\n",
> +		    tx_offloads, adapter->tx_supported_offloads);
> +		return -ENOTSUP;
> +	}


32bit compilation will break with above print. Using PRIx64 is preferable (I also changed it on the mlx series).

> 
>  	if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
>  	      adapter->state == ENA_ADAPTER_STATE_STOPPED)) { @@ -
> 1407,6 +1433,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
>  		break;
>  	}
> 
> +	adapter->tx_selected_offloads = tx_offloads;
>  	return 0;
>  }
> 
> @@ -1435,13 +1462,26 @@ static void ena_init_rings(struct ena_adapter
> *adapter)
>  	}
>  }
> 
> +static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter
> *adapter,
> +					      uint64_t offloads)
> +{
> +	uint64_t port_offloads = adapter->tx_selected_offloads;
> +
> +	/* Check if port supports all requested offloads.
> +	 * True if all offloads selected for queue are set for port.
> +	 */
> +	if ((offloads & port_offloads) != offloads)
> +		return false;
> +	return true;
> +}
> +
>  static void ena_infos_get(struct rte_eth_dev *dev,
>  			  struct rte_eth_dev_info *dev_info)  {
>  	struct ena_adapter *adapter;
>  	struct ena_com_dev *ena_dev;
>  	struct ena_com_dev_get_features_ctx feat;
> -	uint32_t rx_feat = 0, tx_feat = 0;
> +	uint64_t rx_feat = 0, tx_feat = 0;
>  	int rc = 0;
> 
>  	ena_assert_msg(dev->data != NULL, "Uninitialized device"); @@ -
> 1490,6 +1530,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
>  	/* Inform framework about available features */
>  	dev_info->rx_offload_capa = rx_feat;
>  	dev_info->tx_offload_capa = tx_feat;
> +	dev_info->tx_queue_offload_capa = tx_feat;
> 
>  	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
>  	dev_info->max_rx_pktlen  = adapter->max_mtu; @@ -1498,6
> +1539,8 @@ static void ena_infos_get(struct rte_eth_dev *dev,
>  	dev_info->max_rx_queues = adapter->num_queues;
>  	dev_info->max_tx_queues = adapter->num_queues;
>  	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
> +
> +	adapter->tx_supported_offloads = tx_feat;
>  }
> 
>  static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, @@ -1714,7 +1757,7 @@ static uint16_t eth_ena_xmit_pkts(void
> *tx_queue, struct rte_mbuf **tx_pkts,
>  		} /* there's no else as we take advantage of memset zeroing
> */
> 
>  		/* Set TX offloads flags, if applicable */
> -		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx);
> +		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring-
> >offloads);
> 
>  		if (unlikely(mbuf->ol_flags &
>  			     (PKT_RX_L4_CKSUM_BAD |
> PKT_RX_IP_CKSUM_BAD))) diff --git a/drivers/net/ena/ena_ethdev.h
> b/drivers/net/ena/ena_ethdev.h index be8bc9f..3e72777 100644
> --- a/drivers/net/ena/ena_ethdev.h
> +++ b/drivers/net/ena/ena_ethdev.h
> @@ -91,6 +91,7 @@ struct ena_ring {
>  	uint8_t tx_max_header_size;
>  	int configured;
>  	struct ena_adapter *adapter;
> +	uint64_t offloads;
>  } __rte_cache_aligned;
> 
>  enum ena_adapter_state {
> @@ -175,6 +176,8 @@ struct ena_adapter {
>  	struct ena_driver_stats *drv_stats;
>  	enum ena_adapter_state state;
> 
> +	uint64_t tx_supported_offloads;
> +	uint64_t tx_selected_offloads;
>  };
> 
>  #endif /* _ENA_ETHDEV_H_ */

Rest looks OK. 

Reviewed-By: Shahaf Shuler <shahafs@mellanox.com>


> --
> 2.7.4

  reply	other threads:[~2018-01-17  6:56 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-16 11:52 [dpdk-dev] [PATCH 0/2] net/ena: convert to new " Rafal Kozik
2018-01-16 11:52 ` [dpdk-dev] [PATCH 1/2] net/ena: convert to new Tx " Rafal Kozik
2018-01-17  6:56   ` Shahaf Shuler [this message]
2018-01-17 18:58     ` Ferruh Yigit
2018-01-18  6:26       ` Shahaf Shuler
2018-01-18 14:49         ` Ferruh Yigit
2018-01-17  8:23   ` [dpdk-dev] [PATCH v2 " Rafal Kozik
2018-01-17 18:58     ` Ferruh Yigit
2018-01-18  9:18       ` Rafał Kozik
2018-01-18 14:49         ` Ferruh Yigit
2018-01-18 14:53     ` Ferruh Yigit
2018-01-18 14:59       ` Michał Krawczyk
2018-01-18 15:12         ` Ferruh Yigit
2018-01-16 11:52 ` [dpdk-dev] [PATCH 2/2] net/ena: convert to new Rx " Rafal Kozik
2018-01-17  6:57   ` Shahaf Shuler
2018-01-17  8:26   ` [dpdk-dev] [PATCH v2 " Rafal Kozik
2018-01-18 15:06     ` Michał Krawczyk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=VI1PR05MB3149A725C6AE527E388A9A36C3E90@VI1PR05MB3149.eurprd05.prod.outlook.com \
    --to=shahafs@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=evgenys@amazon.com \
    --cc=gtzalik@amazon.com \
    --cc=igorch@amazon.com \
    --cc=matua@amazon.com \
    --cc=mk@semihalf.com \
    --cc=mw@semihalf.com \
    --cc=rk@semihalf.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).