From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <wei.dai@intel.com>
Received: from mga14.intel.com (mga14.intel.com [192.55.52.115])
 by dpdk.org (Postfix) with ESMTP id 1E0F73250
 for <dev@dpdk.org>; Thu,  1 Feb 2018 15:09:50 +0100 (CET)
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from orsmga005.jf.intel.com ([10.7.209.41])
 by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 01 Feb 2018 06:09:49 -0800
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.46,444,1511856000"; d="scan'208";a="197886454"
Received: from dpdk6.bj.intel.com ([172.16.182.87])
 by orsmga005.jf.intel.com with ESMTP; 01 Feb 2018 06:09:48 -0800
From: Wei Dai <wei.dai@intel.com>
To: thomas@monjalon.net,
	shahafs@mellanox.com
Cc: dev@dpdk.org,
	Wei Dai <wei.dai@intel.com>
Date: Thu,  1 Feb 2018 21:53:06 +0800
Message-Id: <1517493186-6687-1-git-send-email-wei.dai@intel.com>
X-Mailer: git-send-email 2.7.5
Subject: [dpdk-dev] [PATCH] ethdev: check consistency of per port offloads
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Thu, 01 Feb 2018 14:09:51 -0000

A per port offloading feature should be enabled or
disabled at same time in both rte_eth_dev_configure( )
and rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
This patch check if a per port offloading flag has
same configuration in rte_eth_dev_configure( ) and
rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ).
This patch can make such checking in a common way in
rte_ethdev layer to avoid same checking in underlying PMD.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 lib/librte_ether/rte_ethdev.c | 70 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 70 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 78bed1a..7945890 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1404,6 +1404,44 @@ rte_eth_dev_is_removed(uint16_t port_id)
 	return ret;
 }
 
+/**
+* Check if the Rx/Tx queue offloading settings is valid
+* @param queue_offloads
+*   offloads input to rte_eth_rx_queue_setup( ) or rte_eth_tx_queue_setup( )
+* @param port_offloads
+*   Rx or Tx offloads input to rte_eth_dev_configure( )
+* @param queue_offload_capa
+*   rx_queue_offload_capa or tx_queue_offload_capa in struct rte_eth_dev_ifnfo
+*   got from rte_eth_dev_info_get( )
+* @param all_offload_capa
+*   rx_offload_capa or tx_offload_capa in struct rte_eth_dev_info
+*   got from rte_eth_dev_info_get( )
+*
+* @return
+*   Nonzero when per-queue offloading setting is valid
+*/
+static int
+rte_eth_check_queue_offloads(uint64_t queue_offloads,
+			     uint64_t port_offloads,
+			     uint64_t queue_offload_capa,
+			     uint64_t all_offload_capa)
+{
+	uint64_t pure_port_capa = all_offload_capa ^ queue_offload_capa;
+
+	return !((port_offloads ^ queue_offloads) & pure_port_capa);
+}
+
+static int
+rte_eth_check_rx_queue_offloads(uint64_t rx_queue_offloads,
+				const struct rte_eth_rxmode *rxmode,
+				const struct rte_eth_dev_info *dev_info)
+{
+	return rte_eth_check_queue_offloads(rx_queue_offloads,
+					    rxmode->offloads,
+					    dev_info->rx_queue_offload_capa,
+					    dev_info->rx_offload_capa);
+}
+
 int
 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		       uint16_t nb_rx_desc, unsigned int socket_id,
@@ -1446,6 +1484,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 				(int) sizeof(struct rte_pktmbuf_pool_private));
 		return -ENOSPC;
 	}
+
 	mbp_buf_size = rte_pktmbuf_data_room_size(mp);
 
 	if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
@@ -1495,6 +1534,16 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						    &local_conf.offloads);
 	}
 
+	if (!rte_eth_check_rx_queue_offloads(local_conf.offloads,
+		&dev->data->dev_conf.rxmode, &dev_info)) {
+		RTE_PMD_DEBUG_TRACE("%p : Rx queue offloads ox%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, local_conf.offloads,
+			dev_info.rx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, &local_conf, mp);
 	if (!ret) {
@@ -1555,6 +1604,17 @@ rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
 	*txq_flags = flags;
 }
 
+static int
+rte_eth_check_tx_queue_offloads(uint64_t tx_queue_offloads,
+				const struct rte_eth_txmode *txmode,
+				const struct rte_eth_dev_info *dev_info)
+{
+	return rte_eth_check_queue_offloads(tx_queue_offloads,
+					    txmode->offloads,
+					    dev_info->tx_queue_offload_capa,
+					    dev_info->tx_offload_capa);
+}
+
 int
 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 		       uint16_t nb_tx_desc, unsigned int socket_id,
@@ -1622,6 +1682,16 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 					  &local_conf.offloads);
 	}
 
+	if (!rte_eth_check_tx_queue_offloads(local_conf.offloads,
+		&dev->data->dev_conf.txmode, &dev_info)) {
+		RTE_PMD_DEBUG_TRACE("%p : Tx queue offloads ox%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, local_conf.offloads,
+			dev_info.tx_offload_capa);
+		return -ENOTSUP;
+	}
+
 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
-- 
2.7.5