DPDK patches and discussions
 help / color / mirror / Atom feed
From: Lance Richardson <lance.richardson@broadcom.com>
To: dev@dpdk.org
Cc: ajit.khaparde@broadcom.com, ferruh.yigit@intel.com,
	Lance Richardson <lance.richardson@broadcom.com>
Subject: [dpdk-dev] [PATCH 11/11] net/bnxt: enable RSS for thor-based controllers
Date: Sun,  2 Jun 2019 13:42:47 -0400	[thread overview]
Message-ID: <20190602174247.32368-13-lance.richardson@broadcom.com> (raw)
In-Reply-To: <20190602174247.32368-1-lance.richardson@broadcom.com>

Make changes needed to support rss for thor-based controllers.

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_ethdev.c |  69 ++++++++++--
 drivers/net/bnxt/bnxt_hwrm.c   | 190 +++++++++++++++++++++++++++------
 drivers/net/bnxt/bnxt_hwrm.h   |   6 +-
 drivers/net/bnxt/bnxt_vnic.c   |  15 ++-
 drivers/net/bnxt/bnxt_vnic.h   |   1 +
 5 files changed, 235 insertions(+), 46 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index d26066062..749763c25 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -171,6 +171,24 @@ static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
  * High level utility functions
  */
 
+static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
+{
+	if (!BNXT_CHIP_THOR(bp))
+		return 1;
+
+	return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
+				  BNXT_RSS_ENTRIES_PER_CTX_THOR) /
+				    BNXT_RSS_ENTRIES_PER_CTX_THOR;
+}
+
+static uint16_t  bnxt_rss_hash_tbl_size(const struct bnxt *bp)
+{
+	if (!BNXT_CHIP_THOR(bp))
+		return HW_HASH_INDEX_SIZE;
+
+	return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
+}
+
 static void bnxt_free_mem(struct bnxt *bp)
 {
 	bnxt_free_filter_mem(bp);
@@ -290,13 +308,21 @@ static int bnxt_init_chip(struct bnxt *bp)
 
 		/* Alloc RSS context only if RSS mode is enabled */
 		if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
-			rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
+			int j, nr_ctxs = bnxt_rss_ctxts(bp);
+
+			rc = 0;
+			for (j = 0; j < nr_ctxs; j++) {
+				rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
+				if (rc)
+					break;
+			}
 			if (rc) {
 				PMD_DRV_LOG(ERR,
-					"HWRM vnic %d ctx alloc failure rc: %x\n",
-					i, rc);
+				  "HWRM vnic %d ctx %d alloc failure rc: %x\n",
+				  i, j, rc);
 				goto err_out;
 			}
+			vnic->num_lb_ctxts = nr_ctxs;
 		}
 
 		/*
@@ -470,7 +496,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
 	dev_info->max_rx_queues = max_rx_rings;
 	dev_info->max_tx_queues = max_rx_rings;
-	dev_info->reta_size = HW_HASH_INDEX_SIZE;
+	dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
 	dev_info->hash_key_size = 40;
 	max_vnics = bp->max_vnics;
 
@@ -1004,11 +1030,20 @@ static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
 /* Return rxq corresponding to a given rss table ring/group ID. */
 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
 {
+	struct bnxt_rx_queue *rxq;
 	unsigned int i;
 
-	for (i = 0; i < bp->rx_nr_rings; i++) {
-		if (bp->grp_info[i].fw_grp_id == fwr)
-			return i;
+	if (!BNXT_HAS_RING_GRPS(bp)) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			rxq = bp->eth_dev->data->rx_queues[i];
+			if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
+				return rxq->index;
+		}
+	} else {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			if (bp->grp_info[i].fw_grp_id == fwr)
+				return i;
+		}
 	}
 
 	return INVALID_HW_RING_ID;
@@ -1021,7 +1056,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
-	uint16_t tbl_size = HW_HASH_INDEX_SIZE;
+	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
 	uint16_t idx, sft;
 	int i;
 
@@ -1053,6 +1088,16 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
 			return -EINVAL;
 		}
 
+		if (BNXT_CHIP_THOR(bp)) {
+			vnic->rss_table[i * 2] =
+				rxq->rx_ring->rx_ring_struct->fw_ring_id;
+			vnic->rss_table[i * 2 + 1] =
+				rxq->cp_ring->cp_ring_struct->fw_ring_id;
+		} else {
+			vnic->rss_table[i] =
+			    vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
+		}
+
 		vnic->rss_table[i] =
 		    vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
 	}
@@ -1067,7 +1112,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
 {
 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
-	uint16_t tbl_size = HW_HASH_INDEX_SIZE;
+	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
 	uint16_t idx, sft, i;
 
 	/* Retrieve from the default VNIC */
@@ -1090,7 +1135,11 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
 		if (reta_conf[idx].mask & (1ULL << sft)) {
 			uint16_t qid;
 
-			qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
+			if (BNXT_CHIP_THOR(bp))
+				qid = bnxt_rss_to_qid(bp,
+						      vnic->rss_table[i * 2]);
+			else
+				qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
 
 			if (qid == INVALID_HW_RING_ID) {
 				PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index f5b7e4593..a4c879b35 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -1638,9 +1638,11 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 	return rc;
 }
 
-int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
+			     struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
 {
 	int rc = 0;
+	uint16_t ctx_id;
 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
 						bp->hwrm_cmd_resp_addr;
@@ -1648,38 +1650,40 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
-
 	HWRM_CHECK_RESULT();
 
-	vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+	ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+	if (!BNXT_HAS_RING_GRPS(bp))
+		vnic->fw_grp_ids[ctx_idx] = ctx_id;
+	else if (ctx_idx == 0)
+		vnic->rss_rule = ctx_id;
+
 	HWRM_UNLOCK();
-	PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
 
 	return rc;
 }
 
-int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
+			    struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
 {
 	int rc = 0;
 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
 	struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
 						bp->hwrm_cmd_resp_addr;
 
-	if (vnic->rss_rule == (uint16_t)HWRM_NA_SIGNATURE) {
+	if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
 		PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
 		return rc;
 	}
 	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
 
-	req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
+	req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
 	HWRM_CHECK_RESULT();
 	HWRM_UNLOCK();
 
-	vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
-
 	return rc;
 }
 
@@ -1711,6 +1715,47 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	return rc;
 }
 
+static int
+bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	int i;
+	int rc = 0;
+	int nr_ctxs = bp->max_ring_grps;
+	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (!(vnic->rss_table && vnic->hash_type))
+		return 0;
+
+	HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+	req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+	req.hash_mode_flags = vnic->hash_mode;
+
+	req.hash_key_tbl_addr =
+	    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+
+	for (i = 0; i < nr_ctxs; i++) {
+		req.ring_grp_tbl_addr =
+			rte_cpu_to_le_64(vnic->rss_table_dma_addr +
+					 i * HW_HASH_INDEX_SIZE);
+		req.ring_table_pair_index = i;
+		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
+
+		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+					    BNXT_USE_CHIMP_MB);
+
+		HWRM_CHECK_RESULT();
+		if (rc)
+			break;
+	}
+
+	HWRM_UNLOCK();
+
+	return rc;
+}
+
 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
 			   struct bnxt_vnic_info *vnic)
 {
@@ -1718,6 +1763,9 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
+	if (BNXT_CHIP_THOR(bp))
+		return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
+
 	HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
 
 	req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
@@ -2247,7 +2295,7 @@ void bnxt_free_tunnel_ports(struct bnxt *bp)
 
 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 {
-	int i;
+	int i, j;
 
 	if (bp->vnic_info == NULL)
 		return;
@@ -2263,7 +2311,16 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 
 		bnxt_clear_hwrm_vnic_filters(bp, vnic);
 
-		bnxt_hwrm_vnic_ctx_free(bp, vnic);
+		if (!BNXT_CHIP_THOR(bp)) {
+			for (j = 0; j < vnic->num_lb_ctxts; j++) {
+				bnxt_hwrm_vnic_ctx_free(bp, vnic,
+							vnic->fw_grp_ids[j]);
+				vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+			}
+		} else {
+			bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
+			vnic->rss_rule = INVALID_HW_RING_ID;
+		}
 
 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
 
@@ -4037,32 +4094,105 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
 	return 0;
 }
 
-int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static int
+bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
-	unsigned int rss_idx, fw_idx, i;
+	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+	int nr_ctxs = bp->max_ring_grps;
+	struct bnxt_rx_queue **rxqs = bp->rx_queues;
+	uint16_t *ring_tbl = vnic->rss_table;
+	int max_rings = bp->rx_nr_rings;
+	int i, j, k, cnt;
+	int rc = 0;
 
-	if (vnic->rss_table && vnic->hash_type) {
-		/*
-		 * Fill the RSS hash & redirection table with
-		 * ring group ids for all VNICs
-		 */
-		for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
-			rss_idx++, fw_idx++) {
-			for (i = 0; i < bp->rx_cp_nr_rings; i++) {
-				fw_idx %= bp->rx_cp_nr_rings;
-				if (vnic->fw_grp_ids[fw_idx] !=
-				    INVALID_HW_RING_ID)
+	HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+	req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+	req.hash_mode_flags = vnic->hash_mode;
+
+	req.ring_grp_tbl_addr =
+	    rte_cpu_to_le_64(vnic->rss_table_dma_addr);
+	req.hash_key_tbl_addr =
+	    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+
+	for (i = 0, k = 0; i < nr_ctxs; i++) {
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_cp_ring_info *cpr;
+
+		req.ring_table_pair_index = i;
+		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
+
+		for (j = 0; j < 64; j++) {
+			uint16_t ring_id;
+
+			/* Find next active ring. */
+			for (cnt = 0; cnt < max_rings; cnt++) {
+				if (rx_queue_state[k] !=
+						RTE_ETH_QUEUE_STATE_STOPPED)
 					break;
-				fw_idx++;
+				if (++k == max_rings)
+					k = 0;
 			}
-			if (i == bp->rx_cp_nr_rings)
+
+			/* Return if no rings are active. */
+			if (cnt == max_rings)
 				return 0;
-			vnic->rss_table[rss_idx] =
-				vnic->fw_grp_ids[fw_idx];
+
+			/* Add rx/cp ring pair to RSS table. */
+			rxr = rxqs[k]->rx_ring;
+			cpr = rxqs[k]->cp_ring;
+
+			ring_id = rxr->rx_ring_struct->fw_ring_id;
+			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
+			ring_id = cpr->cp_ring_struct->fw_ring_id;
+			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
+
+			if (++k == max_rings)
+				k = 0;
 		}
-		return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+					    BNXT_USE_CHIMP_MB);
+
+		HWRM_CHECK_RESULT();
+		if (rc)
+			break;
 	}
-	return 0;
+
+	HWRM_UNLOCK();
+
+	return rc;
+}
+
+int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	unsigned int rss_idx, fw_idx, i;
+
+	if (!(vnic->rss_table && vnic->hash_type))
+		return 0;
+
+	if (BNXT_CHIP_THOR(bp))
+		return bnxt_vnic_rss_configure_thor(bp, vnic);
+
+	/*
+	 * Fill the RSS hash & redirection table with
+	 * ring group ids for all VNICs
+	 */
+	for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+		rss_idx++, fw_idx++) {
+		for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+			fw_idx %= bp->rx_cp_nr_rings;
+			if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
+				break;
+			fw_idx++;
+		}
+		if (i == bp->rx_cp_nr_rings)
+			return 0;
+		vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
+	}
+	return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
 }
 
 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index ffd99de34..f286bcd8b 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -101,8 +101,10 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 				int16_t fw_vf_id);
-int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
-int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+			     uint16_t ctx_idx);
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+			    uint16_t ctx_idx);
 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
 			   struct bnxt_vnic_info *vnic);
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 2cf5f0b5b..262cfc18d 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -113,14 +113,21 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
 	struct rte_pci_device *pdev = bp->pdev;
 	const struct rte_memzone *mz;
 	char mz_name[RTE_MEMZONE_NAMESIZE];
-	uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(
-				HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
-				HW_HASH_KEY_SIZE +
-				BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN);
+	uint32_t entry_length;
 	uint16_t max_vnics;
 	int i;
 	rte_iova_t mz_phys_addr;
 
+	entry_length = HW_HASH_KEY_SIZE +
+		       BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN;
+
+	if (BNXT_CHIP_THOR(bp))
+		entry_length += BNXT_RSS_TBL_SIZE_THOR *
+				2 * sizeof(*vnic->rss_table);
+	else
+		entry_length += HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
+	entry_length = RTE_CACHE_LINE_ROUNDUP(entry_length);
+
 	max_vnics = bp->max_vnics;
 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
 		 "bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 9029f78c3..16a0d5763 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -18,6 +18,7 @@ struct bnxt_vnic_info {
 	uint16_t	start_grp_id;
 	uint16_t	end_grp_id;
 	uint16_t	*fw_grp_ids;
+	uint16_t	num_lb_ctxts;
 	uint16_t	dflt_ring_grp;
 	uint16_t	mru;
 	uint16_t	hash_type;
-- 
2.17.1


  parent reply	other threads:[~2019-06-02 17:45 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-02 17:42 [dpdk-dev] [PATCH 00/11] add support for BCM57508 controller Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 01/11] net/bnxt: endianness conversions in cp ring macros Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 02/11] net/bnxt: fix ring type macro name usage Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 03/11] net/bnxt: fix width in stats ctx endian conversion Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 04/11] net/bnxt: use consistent values for vnic RSS rule Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 05/11] net/bnxt: reset function earlier in initialization Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 06/11] net/bnxt: support extended hwrm request sizes Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 07/11] net/bnxt: refactor doorbell handling Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 08/11] net/bnxt: refactor ring allocation code Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 09/11] net/bnxt: add support for thor controller Lance Richardson
2019-06-14  2:17   ` [dpdk-dev] compilation failing - " Thomas Monjalon
2019-06-14  3:34     ` Ajit Khaparde
2019-06-14  3:42       ` Lance Richardson
2019-06-14  4:20     ` [dpdk-dev] [PATCH] net/bnxt: fix compilation error with some compilers Ajit Khaparde
2019-06-14  6:56       ` Thomas Monjalon
2019-06-02 17:42 ` [dpdk-dev] [PATCH 10/11] net/bnxt: enable completion coalescing for thor Lance Richardson
2019-06-02 17:42 ` [dpdk-dev] [PATCH 11/11] net/bnxt: enable RSS for thor-based adapters Lance Richardson
2019-06-02 17:42 ` Lance Richardson [this message]
2019-06-07 10:48 ` [dpdk-dev] [PATCH 00/11] add support for BCM57508 controller Ferruh Yigit
2019-06-07 10:52   ` Ferruh Yigit
2019-06-07 13:45     ` Lance Richardson
2019-06-07 14:13       ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190602174247.32368-13-lance.richardson@broadcom.com \
    --to=lance.richardson@broadcom.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).