DPDK patches and discussions
 help / color / mirror / Atom feed
From: Rasesh Mody <rasesh.mody@qlogic.com>
To: <ferruh.yigit@intel.com>, <thomas.monjalon@6wind.com>,
	<bruce.richardson@intel.com>
Cc: <dev@dpdk.org>, <Dept-EngDPDKDev@qlogic.com>,
	Rasesh Mody <rasesh.mody@qlogic.com>
Subject: [dpdk-dev] [PATCH v4 24/32] net/qede/base: change Rx Tx queue start APIs
Date: Tue, 18 Oct 2016 21:11:38 -0700	[thread overview]
Message-ID: <1476850306-2141-25-git-send-email-rasesh.mody@qlogic.com> (raw)
In-Reply-To: <1476850306-2141-1-git-send-email-rasesh.mody@qlogic.com>

Changed q_{rx,tx}_start APIs to use common queue start parameters

Signed-off-by: Rasesh Mody <rasesh.mody@qlogic.com>
---
 drivers/net/qede/base/ecore_l2.c     | 131 +++++++++++++++--------------------
 drivers/net/qede/base/ecore_l2.h     |  26 ++-----
 drivers/net/qede/base/ecore_l2_api.h |  69 +++++++++---------
 drivers/net/qede/base/ecore_sriov.c  |  28 +++++---
 drivers/net/qede/qede_eth_if.c       |  47 ++++++-------
 drivers/net/qede/qede_eth_if.h       |  11 ++-
 drivers/net/qede/qede_rxtx.c         |  27 +++++---
 7 files changed, 155 insertions(+), 184 deletions(-)

diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 83a62e0..74f61b0 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -548,12 +548,7 @@ enum _ecore_status_t
 ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
 			      u16 opaque_fid,
 			      u32 cid,
-			      u16 rx_queue_id,
-			      u8 vf_rx_queue_id,
-			      u8 vport_id,
-			      u8 stats_id,
-			      u16 sb,
-			      u8 sb_index,
+			      struct ecore_queue_start_common_params *p_params,
 			      u16 bd_max_bytes,
 			      dma_addr_t bd_chain_phys_addr,
 			      dma_addr_t cqe_pbl_addr,
@@ -568,22 +563,23 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
 	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
 	/* Store information for the stop */
-	p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+	p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
 	p_rx_cid->cid = cid;
 	p_rx_cid->opaque_fid = opaque_fid;
-	p_rx_cid->vport_id = vport_id;
+	p_rx_cid->vport_id = p_params->vport_id;
 
-	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
-	rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+	rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 		   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-		   opaque_fid, cid, rx_queue_id, vport_id, sb);
+		   opaque_fid, cid, p_params->queue_id,
+		   p_params->vport_id, p_params->sb);
 
 	/* Get SPQ entry */
 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -599,10 +595,10 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
 
 	p_ramrod = &p_ent->ramrod.rx_queue_start;
 
-	p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
-	p_ramrod->sb_index = sb_index;
+	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
+	p_ramrod->sb_index = (u8)p_params->sb_idx;
 	p_ramrod->vport_id = abs_vport_id;
-	p_ramrod->stats_counter_id = stats_id;
+	p_ramrod->stats_counter_id = p_params->stats_id;
 	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
 	p_ramrod->complete_cqe_flg = 0;
 	p_ramrod->complete_event_flg = 1;
@@ -613,30 +609,27 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
 	p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
 
-	if (vf_rx_queue_id || b_use_zone_a_prod) {
-		p_ramrod->vf_rx_prod_index = vf_rx_queue_id;
+	if (p_params->vf_qid || b_use_zone_a_prod) {
+		p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 			   "Queue%s is meant for VF rxq[%02x]\n",
 			   b_use_zone_a_prod ? " [legacy]" : "",
-			   vf_rx_queue_id);
+			   p_params->vf_qid);
 		p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
 	}
 
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
-enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
-						 u16 opaque_fid,
-						 u8 rx_queue_id,
-						 u8 vport_id,
-						 u8 stats_id,
-						 u16 sb,
-						 u8 sb_index,
-						 u16 bd_max_bytes,
-						 dma_addr_t bd_chain_phys_addr,
-						 dma_addr_t cqe_pbl_addr,
-						 u16 cqe_pbl_size,
-						 void OSAL_IOMEM **pp_prod)
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+			    u16 opaque_fid,
+			    struct ecore_queue_start_common_params *p_params,
+			    u16 bd_max_bytes,
+			    dma_addr_t bd_chain_phys_addr,
+			    dma_addr_t cqe_pbl_addr,
+			    u16 cqe_pbl_size,
+			    void OSAL_IOMEM * *pp_prod)
 {
 	struct ecore_hw_cid_data *p_rx_cid;
 	u32 init_prod_val = 0;
@@ -646,20 +639,20 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
 
 	if (IS_VF(p_hwfn->p_dev)) {
 		return ecore_vf_pf_rxq_start(p_hwfn,
-					     rx_queue_id,
-					     sb,
-					     sb_index,
+					     p_params->queue_id,
+					     p_params->sb,
+					     (u8)p_params->sb_idx,
 					     bd_max_bytes,
 					     bd_chain_phys_addr,
 					     cqe_pbl_addr,
 					     cqe_pbl_size, pp_prod);
 	}
 
-	rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);
+	rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
-	rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+	rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
@@ -672,7 +665,7 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
 			  (u32 *)(&init_prod_val));
 
 	/* Allocate a CID for the queue */
-	p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+	p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
 	rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
 				   &p_rx_cid->cid);
 	if (rc != ECORE_SUCCESS) {
@@ -680,16 +673,13 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
 		return rc;
 	}
 	p_rx_cid->b_cid_allocated = true;
+	p_params->stats_id = abs_stats_id;
+	p_params->vf_qid = 0;
 
 	rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
 					   opaque_fid,
 					   p_rx_cid->cid,
-					   rx_queue_id,
-					   0,
-					   vport_id,
-					   abs_stats_id,
-					   sb,
-					   sb_index,
+					   p_params,
 					   bd_max_bytes,
 					   bd_chain_phys_addr,
 					   cqe_pbl_addr,
@@ -816,12 +806,8 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t
 ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
 			      u16 opaque_fid,
-			      u16 tx_queue_id,
 			      u32 cid,
-			      u8 vport_id,
-			      u8 stats_id,
-			      u16 sb,
-			      u8 sb_index,
+			      struct ecore_queue_start_common_params *p_params,
 			      dma_addr_t pbl_addr,
 			      u16 pbl_size,
 			      union ecore_qm_pq_params *p_pq_params)
@@ -835,15 +821,15 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
 	enum _ecore_status_t rc = ECORE_NOTIMPL;
 
 	/* Store information for the stop */
-	p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
 	p_tx_cid->cid = cid;
 	p_tx_cid->opaque_fid = opaque_fid;
 
-	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
-	rc = ecore_fw_l2_queue(p_hwfn, tx_queue_id, &abs_tx_q_id);
+	rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
@@ -862,9 +848,9 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
 	p_ramrod = &p_ent->ramrod.tx_queue_start;
 	p_ramrod->vport_id = abs_vport_id;
 
-	p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
-	p_ramrod->sb_index = sb_index;
-	p_ramrod->stats_counter_id = stats_id;
+	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
+	p_ramrod->sb_index = (u8)p_params->sb_idx;
+	p_ramrod->stats_counter_id = p_params->stats_id;
 
 	p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
 
@@ -877,17 +863,14 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
-enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
-						 u16 opaque_fid,
-						 u16 tx_queue_id,
-						 u8 vport_id,
-						 u8 stats_id,
-						 u16 sb,
-						 u8 sb_index,
-						 u8 tc,
-						 dma_addr_t pbl_addr,
-						 u16 pbl_size,
-						 void OSAL_IOMEM **pp_doorbell)
+enum _ecore_status_t
+ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+			    u16 opaque_fid,
+			    struct ecore_queue_start_common_params *p_params,
+			    u8 tc,
+			    dma_addr_t pbl_addr,
+			    u16 pbl_size,
+			    void OSAL_IOMEM * *pp_doorbell)
 {
 	struct ecore_hw_cid_data *p_tx_cid;
 	union ecore_qm_pq_params pq_params;
@@ -896,19 +879,19 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 
 	if (IS_VF(p_hwfn->p_dev)) {
 		return ecore_vf_pf_txq_start(p_hwfn,
-					     tx_queue_id,
-					     sb,
-					     sb_index,
+					     p_params->queue_id,
+					     p_params->sb,
+					     (u8)p_params->sb_idx,
 					     pbl_addr,
 					     pbl_size,
 					     pp_doorbell);
 	}
 
-	rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+	rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
 	if (rc != ECORE_SUCCESS)
 		return rc;
 
-	p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
 	OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
 	OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
 
@@ -924,18 +907,16 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
 		   "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-		    opaque_fid, p_tx_cid->cid, tx_queue_id,
-		    vport_id, sb);
+		    opaque_fid, p_tx_cid->cid, p_params->queue_id,
+		    p_params->vport_id, p_params->sb);
+
+	p_params->stats_id = abs_stats_id;
 
 	/* TODO - set tc in the pq_params for multi-cos */
 	rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
 					   opaque_fid,
-					   tx_queue_id,
 					   p_tx_cid->cid,
-					   vport_id,
-					   abs_stats_id,
-					   sb,
-					   sb_index,
+					   p_params,
 					   pbl_addr,
 					   pbl_size,
 					   &pq_params);
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index c8419a3..9c1bd38 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -40,11 +40,8 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
  * @param p_hwfn
  * @param opaque_fid
  * @param cid
- * @param rx_queue_id
- * @param vport_id
- * @param stats_id
- * @param sb
- * @param sb_index
+ * @param p_params [queue_id, vport_id, stats_id, sb, sb_idx, vf_qid]
+	  stats_id is absolute packed in p_params.
  * @param bd_max_bytes
  * @param bd_chain_phys_addr
  * @param cqe_pbl_addr
@@ -57,12 +54,7 @@ enum _ecore_status_t
 ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn	*p_hwfn,
 			      u16 opaque_fid,
 			      u32 cid,
-			      u16 rx_queue_id,
-			      u8 vf_rx_queue_id,
-			      u8 vport_id,
-			      u8 stats_id,
-			      u16 sb,
-			      u8 sb_index,
+			      struct ecore_queue_start_common_params *p_params,
 			      u16 bd_max_bytes,
 			      dma_addr_t bd_chain_phys_addr,
 			      dma_addr_t cqe_pbl_addr,
@@ -74,12 +66,8 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn	*p_hwfn,
  *
  * @param p_hwfn
  * @param opaque_fid
- * @param tx_queue_id
  * @param cid
- * @param vport_id
- * @param stats_id
- * @param sb
- * @param sb_index
+ * @param p_params [queue_id, vport_id,stats_id, sb, sb_idx, vf_qid]
  * @param pbl_addr
  * @param pbl_size
  * @param p_pq_params - parameters for choosing the PQ for this Tx queue
@@ -89,12 +77,8 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn	*p_hwfn,
 enum _ecore_status_t
 ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn	*p_hwfn,
 			      u16 opaque_fid,
-			      u16 tx_queue_id,
 			      u32 cid,
-			      u8 vport_id,
-			      u8 stats_id,
-			      u16 sb,
-			      u8 sb_index,
+			      struct ecore_queue_start_common_params *p_params,
 			      dma_addr_t pbl_addr,
 			      u16 pbl_size,
 			      union ecore_qm_pq_params *p_pq_params);
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 447d1fb..326fa45 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -27,6 +27,18 @@ enum ecore_rss_caps {
 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
 #endif
 
+struct ecore_queue_start_common_params {
+	/* Rx/Tx queue id */
+	u8 queue_id;
+	u8 vport_id;
+
+	/* stats_id is relative or absolute depends on function */
+	u8 stats_id;
+	u16 sb;
+	u16 sb_idx;
+	u16 vf_qid;
+};
+
 struct ecore_rss_params {
 	u8 update_rss_config;
 	u8 rss_enable;
@@ -154,14 +166,7 @@ ecore_filter_accept_cmd(
  *
  * @param p_hwfn
  * @param opaque_fid
- * @param rx_queue_id		RX Queue ID: Zero based, per VPort, allocated
- *				by assignment (=rssId)
- * @param vport_id		VPort ID
- * @param u8 stats_id		 VPort ID which the queue stats
- *				will be added to
- * @param sb			Status Block of the Function Event Ring
- * @param sb_index		Index into the status block of the
- *				Function Event Ring
+ * @p_params			[stats_id is relative, packed in p_params]
  * @param bd_max_bytes		Maximum bytes that can be placed on a BD
  * @param bd_chain_phys_addr	Physical address of BDs for receive.
  * @param cqe_pbl_addr		Physical address of the CQE PBL Table.
@@ -172,18 +177,15 @@ ecore_filter_accept_cmd(
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
-						 u16 opaque_fid,
-						 u8 rx_queue_id,
-						 u8 vport_id,
-						 u8 stats_id,
-						 u16 sb,
-						 u8 sb_index,
-						 u16 bd_max_bytes,
-						 dma_addr_t bd_chain_phys_addr,
-						 dma_addr_t cqe_pbl_addr,
-						 u16 cqe_pbl_size,
-						 void OSAL_IOMEM **pp_prod);
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+			    u16 opaque_fid,
+			    struct ecore_queue_start_common_params *p_params,
+			    u16 bd_max_bytes,
+			    dma_addr_t bd_chain_phys_addr,
+			    dma_addr_t cqe_pbl_addr,
+			    u16 cqe_pbl_size,
+			    void OSAL_IOMEM * *pp_prod);
 
 /**
  * @brief ecore_sp_eth_rx_queue_stop -
@@ -216,13 +218,7 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
  *
  * @param p_hwfn
  * @param opaque_fid
- * @param tx_queue_id		TX Queue ID
- * @param vport_id		VPort ID
- * @param u8 stats_id		 VPort ID which the queue stats
- *				will be added to
- * @param sb			Status Block of the Function Event Ring
- * @param sb_index		Index into the status block of the Function
- *				Event Ring
+ * @p_params
  * @param tc			traffic class to use with this L2 txq
  * @param pbl_addr		address of the pbl array
  * @param pbl_size		number of entries in pbl
@@ -232,17 +228,14 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
-						 u16 opaque_fid,
-						 u16 tx_queue_id,
-						 u8 vport_id,
-						 u8 stats_id,
-						 u16 sb,
-						 u8 sb_index,
-						 u8 tc,
-						 dma_addr_t pbl_addr,
-						 u16 pbl_size,
-						 void OSAL_IOMEM **pp_doorbell);
+enum _ecore_status_t
+ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+			    u16 opaque_fid,
+			    struct ecore_queue_start_common_params *p_params,
+			    u8 tc,
+			    dma_addr_t pbl_addr,
+			    u16 pbl_size,
+			    void OSAL_IOMEM * *pp_doorbell);
 
 /**
  * @brief ecore_sp_eth_tx_queue_stop -
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index eb3a1e2..b28d728 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1961,6 +1961,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 				       struct ecore_ptt *p_ptt,
 				       struct ecore_vf_info *vf)
 {
+	struct ecore_queue_start_common_params p_params;
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 	u8 status = PFVF_STATUS_NO_RESOURCE;
 	struct vfpf_start_rxq_tlv *req;
@@ -1968,6 +1969,13 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	enum _ecore_status_t rc;
 
 	req = &mbx->req_virt->start_rxq;
+	OSAL_MEMSET(&p_params, 0, sizeof(p_params));
+	p_params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
+	p_params.vf_qid = req->rx_qid;
+	p_params.vport_id = vf->vport_id;
+	p_params.stats_id = vf->abs_vf_id + 0x10,
+	p_params.sb = req->hw_sb;
+	p_params.sb_idx = req->sb_index;
 
 	if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
 	    !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
@@ -1987,12 +1995,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 
 	rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
 					   vf->vf_queues[req->rx_qid].fw_cid,
-					   vf->vf_queues[req->rx_qid].fw_rx_qid,
-					   (u8)req->rx_qid,
-					   vf->vport_id,
-					   vf->abs_vf_id + 0x10,
-					   req->hw_sb,
-					   req->sb_index,
+					   &p_params,
 					   req->bd_max_bytes,
 					   req->rxq_addr,
 					   req->cqe_pbl_addr,
@@ -2057,6 +2060,7 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 				       struct ecore_ptt *p_ptt,
 				       struct ecore_vf_info *vf)
 {
+	struct ecore_queue_start_common_params p_params;
 	struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
 	u8 status = PFVF_STATUS_NO_RESOURCE;
 	union ecore_qm_pq_params pq_params;
@@ -2069,6 +2073,12 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	pq_params.eth.vf_id = vf->relative_vf_id;
 
 	req = &mbx->req_virt->start_txq;
+	OSAL_MEMSET(&p_params, 0, sizeof(p_params));
+	p_params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
+	p_params.vport_id = vf->vport_id;
+	p_params.stats_id = vf->abs_vf_id + 0x10,
+	p_params.sb = req->hw_sb;
+	p_params.sb_idx = req->sb_index;
 
 	if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
 	    !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
@@ -2077,12 +2087,8 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	rc = ecore_sp_eth_txq_start_ramrod(
 		p_hwfn,
 		vf->opaque_fid,
-		vf->vf_queues[req->tx_qid].fw_tx_qid,
 		vf->vf_queues[req->tx_qid].fw_cid,
-		vf->vport_id,
-		vf->abs_vf_id + 0x10,
-		req->hw_sb,
-		req->sb_index,
+		&p_params,
 		req->pbl_addr,
 		req->pbl_size,
 		&pq_params);
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
index a19b22e..1ae6127 100644
--- a/drivers/net/qede/qede_eth_if.c
+++ b/drivers/net/qede/qede_eth_if.c
@@ -168,9 +168,9 @@ qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
 
 static int
 qed_start_rxq(struct ecore_dev *edev,
-	      uint8_t rss_id, uint8_t rx_queue_id,
-	      uint8_t vport_id, uint16_t sb,
-	      uint8_t sb_index, uint16_t bd_max_bytes,
+	      uint8_t rss_num,
+	      struct ecore_queue_start_common_params *p_params,
+	      uint16_t bd_max_bytes,
 	      dma_addr_t bd_chain_phys_addr,
 	      dma_addr_t cqe_pbl_addr,
 	      uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
@@ -178,28 +178,28 @@ qed_start_rxq(struct ecore_dev *edev,
 	struct ecore_hwfn *p_hwfn;
 	int rc, hwfn_index;
 
-	hwfn_index = rss_id % edev->num_hwfns;
+	hwfn_index = rss_num % edev->num_hwfns;
 	p_hwfn = &edev->hwfns[hwfn_index];
 
+	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
+	p_params->stats_id = p_params->vport_id;
+
 	rc = ecore_sp_eth_rx_queue_start(p_hwfn,
 					 p_hwfn->hw_info.opaque_fid,
-					 rx_queue_id / edev->num_hwfns,
-					 vport_id,
-					 vport_id,
-					 sb,
-					 sb_index,
+					 p_params,
 					 bd_max_bytes,
 					 bd_chain_phys_addr,
 					 cqe_pbl_addr, cqe_pbl_size, pp_prod);
 
 	if (rc) {
-		DP_ERR(edev, "Failed to start RXQ#%d\n", rx_queue_id);
+		DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
 		return rc;
 	}
 
 	DP_VERBOSE(edev, ECORE_MSG_SPQ,
-		   "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
-		   rx_queue_id, rss_id, vport_id, sb);
+		   "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+		   p_params->queue_id, rss_num, p_params->vport_id,
+		   p_params->sb);
 
 	return 0;
 }
@@ -226,36 +226,35 @@ qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
 
 static int
 qed_start_txq(struct ecore_dev *edev,
-	      uint8_t rss_id, uint16_t tx_queue_id,
-	      uint8_t vport_id, uint16_t sb,
-	      uint8_t sb_index,
+	      uint8_t rss_num,
+	      struct ecore_queue_start_common_params *p_params,
 	      dma_addr_t pbl_addr,
 	      uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
 {
 	struct ecore_hwfn *p_hwfn;
 	int rc, hwfn_index;
 
-	hwfn_index = rss_id % edev->num_hwfns;
+	hwfn_index = rss_num % edev->num_hwfns;
 	p_hwfn = &edev->hwfns[hwfn_index];
 
+	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
+	p_params->stats_id = p_params->vport_id;
+
 	rc = ecore_sp_eth_tx_queue_start(p_hwfn,
 					 p_hwfn->hw_info.opaque_fid,
-					 tx_queue_id / edev->num_hwfns,
-					 vport_id,
-					 vport_id,
-					 sb,
-					 sb_index,
+					 p_params,
 					 0 /* tc */,
 					 pbl_addr, pbl_size, pp_doorbell);
 
 	if (rc) {
-		DP_ERR(edev, "Failed to start TXQ#%d\n", tx_queue_id);
+		DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
 		return rc;
 	}
 
 	DP_VERBOSE(edev, ECORE_MSG_SPQ,
-		   "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
-		   tx_queue_id, rss_id, vport_id, sb);
+		   "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+		   p_params->queue_id, rss_num, p_params->vport_id,
+		   p_params->sb);
 
 	return 0;
 }
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
index 5a7fdc9..33655c3 100644
--- a/drivers/net/qede/qede_eth_if.h
+++ b/drivers/net/qede/qede_eth_if.h
@@ -133,9 +133,9 @@ struct qed_eth_ops {
 			    struct qed_update_vport_params *params);
 
 	int (*q_rx_start)(struct ecore_dev *cdev,
-			  uint8_t rss_id, uint8_t rx_queue_id,
-			  uint8_t vport_id, uint16_t sb,
-			  uint8_t sb_index, uint16_t bd_max_bytes,
+			  uint8_t rss_num,
+			  struct ecore_queue_start_common_params *p_params,
+			  uint16_t bd_max_bytes,
 			  dma_addr_t bd_chain_phys_addr,
 			  dma_addr_t cqe_pbl_addr,
 			  uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
@@ -144,9 +144,8 @@ struct qed_eth_ops {
 			 struct qed_stop_rxq_params *params);
 
 	int (*q_tx_start)(struct ecore_dev *edev,
-			  uint8_t rss_id, uint16_t tx_queue_id,
-			  uint8_t vport_id, uint16_t sb,
-			  uint8_t sb_index,
+			  uint8_t rss_num,
+			  struct ecore_queue_start_common_params *p_params,
 			  dma_addr_t pbl_addr,
 			  uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
 
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 54cd849..8f83497 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -572,6 +572,7 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 {
 	struct qede_dev *qdev = eth_dev->data->dev_private;
 	struct ecore_dev *edev = &qdev->edev;
+	struct ecore_queue_start_common_params q_params;
 	struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
 	struct qed_dev_info *qed_info = &qdev->dev_info.common;
 	struct qed_update_vport_params vport_update_params;
@@ -591,12 +592,15 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 			page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
 								rx_comp_ring);
 
+			memset(&q_params, 0, sizeof(q_params));
+			q_params.queue_id = i;
+			q_params.vport_id = 0;
+			q_params.sb = fp->sb_info->igu_sb_id;
+			q_params.sb_idx = RX_PI;
+
 			ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
 
-			rc = qdev->ops->q_rx_start(edev, i, fp->rxq->queue_id,
-					   0,
-					   fp->sb_info->igu_sb_id,
-					   RX_PI,
+			rc = qdev->ops->q_rx_start(edev, i, &q_params,
 					   fp->rxq->rx_buf_size,
 					   fp->rxq->rx_bd_ring.p_phys_addr,
 					   p_phys_table,
@@ -622,11 +626,16 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
 
 			p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
 			page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
-			rc = qdev->ops->q_tx_start(edev, i, txq->queue_id,
-						   0,
-						   fp->sb_info->igu_sb_id,
-						   TX_PI(tc),
-						   p_phys_table, page_cnt,
+
+			memset(&q_params, 0, sizeof(q_params));
+			q_params.queue_id = txq->queue_id;
+			q_params.vport_id = 0;
+			q_params.sb = fp->sb_info->igu_sb_id;
+			q_params.sb_idx = TX_PI(tc);
+
+			rc = qdev->ops->q_tx_start(edev, i, &q_params,
+						   p_phys_table,
+						   page_cnt, /* **pp_doorbell */
 						   &txq->doorbell_addr);
 			if (rc) {
 				DP_ERR(edev, "Start txq %u failed %d\n",
-- 
1.8.3.1

  parent reply	other threads:[~2016-10-19  4:14 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-19  4:11 [dpdk-dev] [PATCH v4 00/32] net/qede: update qede pmd to 1.2.0.1 and enable by default Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 01/32] net/qede/base: add new init files and rearrange the code Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 02/32] net/qede/base: formatting changes Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 03/32] net/qede: use FW CONFIG defines as needed Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 04/32] net/qede/base: add HSI changes and register defines Rasesh Mody
2016-10-19 12:37   ` Ferruh Yigit
2016-10-19 13:46     ` Mody, Rasesh
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 05/32] net/qede/base: add attention formatting string Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 06/32] net/qede/base: additional formatting/comment changes Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 07/32] net/qede: fix 32 bit compilation Rasesh Mody
2016-10-26 16:54   ` Thomas Monjalon
2016-10-26 21:01     ` Mody, Rasesh
2016-10-26 21:40       ` Thomas Monjalon
2016-10-28  6:37         ` [dpdk-dev] [PATCH] net/qede: fix gcc compiler option checks Rasesh Mody
2016-10-28 22:12           ` Stephen Hemminger
2016-10-28 22:49             ` Mody, Rasesh
2016-11-07 19:54               ` Thomas Monjalon
2016-11-07 20:10           ` Thomas Monjalon
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 08/32] net/qede: change signature of MCP command API Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 09/32] net/qede: serialize access to MFW mbox Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 10/32] net/qede: add NIC selftest and query sensor info support Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 11/32] net/qede/base: update base driver Rasesh Mody
2021-03-24 14:07   ` Ferruh Yigit
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 12/32] net/qede/base: rename structure and defines Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 13/32] net/qede/base: comment enhancements Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 14/32] net/qede/base: add MFW crash dump support Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 15/32] net/qede: enable support for unequal number of Rx/Tx queues Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 16/32] net/qede: fix port (re)configuration issue Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 17/32] net/qede/base: allow MTU change via vport-update Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 18/32] net/qede: add missing 100G link speed capability Rasesh Mody
2016-10-26 15:41   ` Thomas Monjalon
2016-10-26 15:54     ` Bruce Richardson
2016-10-26 21:28     ` Harish Patil
2016-10-26 21:43       ` Thomas Monjalon
2016-10-28  6:42         ` [dpdk-dev] [PATCH] net/qede: fix advertising " Rasesh Mody
2016-10-28  7:26           ` Thomas Monjalon
2016-10-29  1:11             ` Harish Patil
2016-10-29  6:14             ` [dpdk-dev] [PATCH v2] " Rasesh Mody
2016-10-31 18:35               ` [dpdk-dev] [PATCH v3] " Rasesh Mody
2016-10-31 18:35                 ` Rasesh Mody
2016-11-07 19:48                   ` Thomas Monjalon
2016-11-10  2:54                     ` Harish Patil
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 19/32] net/qede: remove unused/dead code Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 20/32] net/qede: fixes for VLAN filters Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 21/32] net/qede: add enable/disable VLAN filtering Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 22/32] net/qede: fix RSS related issues Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 23/32] net/qede: add scatter gather support Rasesh Mody
2016-10-19  4:11 ` Rasesh Mody [this message]
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 25/32] net/qede/base: add support to initiate PF FLR Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 26/32] net/qede: skip slowpath polling for 100G VF device Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 27/32] net/qede: fix driver version string Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 28/32] net/qede: fix status block index for VF queues Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 29/32] net/qede: add support for queue statistics Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 30/32] net/qede: remove zlib dependency and enable PMD by default Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 31/32] doc: update qede pmd documentation Rasesh Mody
2016-10-19  4:11 ` [dpdk-dev] [PATCH v4 32/32] net/qede: update driver version Rasesh Mody
2016-10-24 13:41 ` [dpdk-dev] [PATCH v4 00/32] net/qede: update qede pmd to 1.2.0.1 and enable by default Bruce Richardson
2016-10-26 15:20   ` Thomas Monjalon
2016-10-26 17:01     ` Mody, Rasesh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1476850306-2141-25-git-send-email-rasesh.mody@qlogic.com \
    --to=rasesh.mody@qlogic.com \
    --cc=Dept-EngDPDKDev@qlogic.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=thomas.monjalon@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).