* [dpdk-stable] [PATCH 2/5] net/qede: fix minimum buffer size and scatter Rx check
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
@ 2016-12-31 8:16 ` Rasesh Mody
2016-12-31 8:16 ` [dpdk-stable] [PATCH 3/5] net/qede: fix PF fastpath status block index Rasesh Mody
` (7 subsequent siblings)
8 siblings, 0 replies; 13+ messages in thread
From: Rasesh Mody @ 2016-12-31 8:16 UTC (permalink / raw)
To: dev; +Cc: Harish Patil, stable, Dept-EngDPDKDev
From: Harish Patil <harish.patil@qlogic.com>
- Fix minimum RX buffer size to 1024B
- Force enable scatter/gather mode if given RX buf size is lesser than MTU
- Adjust RX buffer size to cache-line size with overhead included
Fixes: bec0228816c0 ("net/qede: support scatter gather")
Fixes: 2ea6f76aff40 ("qede: add core driver")
Signed-off-by: Harish Patil <harish.patil@qlogic.com>
---
drivers/net/qede/qede_ethdev.c | 3 +--
drivers/net/qede/qede_rxtx.c | 47 +++++++++++++++++-----------------------
drivers/net/qede/qede_rxtx.h | 11 ++++++++--
3 files changed, 30 insertions(+), 31 deletions(-)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index c8581d8..b7606c8 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -968,8 +968,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
- QEDE_ETH_OVERHEAD);
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->tx_desc_lim = qede_tx_desc_lim;
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index ecff5bc..aebe8cb 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -89,11 +89,11 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
{
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct rte_eth_dev_data *eth_data = dev->data;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
- uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint16_t max_rx_pkt_len;
+ uint16_t bufsz;
size_t size;
- uint16_t data_size;
int rc;
int i;
@@ -127,34 +127,27 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
-
- /* Sanity check */
- data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
- RTE_PKTMBUF_HEADROOM;
-
- if (pkt_len > data_size && !dev->data->scattered_rx) {
- DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
- pkt_len, data_size);
- rte_free(rxq);
- return -EINVAL;
+ max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+ qdev->mtu = max_rx_pkt_len;
+
+ /* Fix up RX buffer size */
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ if ((rxmode->enable_scatter) ||
+ (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if (!dev->data->scattered_rx) {
+ DP_INFO(edev, "Forcing scatter-gather mode\n");
+ dev->data->scattered_rx = 1;
+ }
}
-
if (dev->data->scattered_rx)
- rxq->rx_buf_size = data_size;
+ rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
- rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
-
- qdev->mtu = pkt_len;
+ rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
+ /* Align to cache-line size if needed */
+ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
- DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
- qdev->mtu, rxq->rx_buf_size);
-
- if (pkt_len > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- DP_NOTICE(edev, false, "jumbo frame enabled\n");
- } else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
- }
+ DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+ qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index a95b4ab..9a393e9 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -51,14 +51,21 @@
((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+#define QEDE_MIN_RX_BUFF_SIZE (1024)
+#define QEDE_VLAN_TAG_SIZE (4)
+#define QEDE_LLC_SNAP_HDR_LEN (8)
+
/* Max supported alignment is 256 (8 shift)
* minimal alignment shift 6 is optimal for 57xxx HW performance
*/
#define QEDE_L1_CACHE_SHIFT 6
#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
-
-#define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+ ~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD ((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN))
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
ETH_RSS_NONFRAG_IPV4_TCP |\
--
1.7.10.3
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-stable] [PATCH 3/5] net/qede: fix PF fastpath status block index
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
2016-12-31 8:16 ` [dpdk-stable] [PATCH 2/5] net/qede: fix minimum buffer size and scatter Rx check Rasesh Mody
@ 2016-12-31 8:16 ` Rasesh Mody
2016-12-31 8:16 ` [dpdk-stable] [PATCH 4/5] net/qede: fix per queue stats/xstats Rasesh Mody
` (6 subsequent siblings)
8 siblings, 0 replies; 13+ messages in thread
From: Rasesh Mody @ 2016-12-31 8:16 UTC (permalink / raw)
To: dev; +Cc: Harish Patil, stable, Dept-EngDPDKDev
From: Harish Patil <harish.patil@qlogic.com>
Allocate double the number of fastpath status block index
since the PF RX/TX queues are not sharing the status block.
This is an interim solution till other parts of the code
is modified to handle the same.
Fixes: f1e4b6c0acee ("net/qede: fix status block index for VF queues")
Signed-off-by: Harish Patil <harish.patil@qlogic.com>
---
drivers/net/qede/qede_rxtx.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index aebe8cb..f20881c 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -431,13 +431,15 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
struct ecore_dev *edev = &qdev->edev;
struct qede_fastpath *fp;
uint32_t num_sbs;
- int rc, i;
+ uint16_t i;
+ uint16_t sb_idx;
+ int rc;
if (IS_VF(edev))
ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
else
- num_sbs = (ecore_cxt_get_proto_cid_count
- (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL)) / 2;
+ num_sbs = ecore_cxt_get_proto_cid_count
+ (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
if (num_sbs == 0) {
DP_ERR(edev, "No status blocks available\n");
@@ -455,7 +457,11 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
fp = &qdev->fp_array[i];
- if (qede_alloc_mem_sb(qdev, fp->sb_info, i % num_sbs)) {
+ if (IS_VF(edev))
+ sb_idx = i % num_sbs;
+ else
+ sb_idx = i;
+ if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
qede_free_fp_arrays(qdev);
return -ENOMEM;
}
--
1.7.10.3
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-stable] [PATCH 4/5] net/qede: fix per queue stats/xstats
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
2016-12-31 8:16 ` [dpdk-stable] [PATCH 2/5] net/qede: fix minimum buffer size and scatter Rx check Rasesh Mody
2016-12-31 8:16 ` [dpdk-stable] [PATCH 3/5] net/qede: fix PF fastpath status block index Rasesh Mody
@ 2016-12-31 8:16 ` Rasesh Mody
2017-01-05 13:26 ` [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Ferruh Yigit
` (5 subsequent siblings)
8 siblings, 0 replies; 13+ messages in thread
From: Rasesh Mody @ 2016-12-31 8:16 UTC (permalink / raw)
To: dev; +Cc: Rasesh Mody, stable, Dept-EngDPDKDev
From: Rasesh Mody <Rasesh.Mody@cavium.com>
If value of number of rxq/txq is diffrent than
RTE_ETHDEV_QUEUE_STAT_CNTRS, limit per queue
stats/xstats to minimum of the two.
Fixes: 7634c5f91569 ("net/qede: add queue statistics")
Signed-off-by: Rasesh Mody <Rasesh.Mody@cavium.com>
---
drivers/net/qede/qede_ethdev.c | 32 +++++++++++++++++++++++++++++---
1 file changed, 29 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index b7606c8..de8286c 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1160,6 +1160,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
qdev->ops->get_vport_stats(edev, &stats);
@@ -1193,6 +1194,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
eth_stats->oerrors = stats.tx_err_drop_pkts;
/* Queue stats */
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG,
+ "Not all the queue stats will be displayed. Set"
+ " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
+ " appropriately and retry.\n");
+
for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
eth_stats->q_ipackets[i] =
@@ -1211,7 +1223,11 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
rx_alloc_errors));
i++;
}
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
txq = qdev->fp_array[(qid)].txqs[0];
eth_stats->q_opackets[j] =
@@ -1221,13 +1237,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
xmit_pkts)));
j++;
}
+ if (j == txq_stat_cntrs)
+ break;
}
}
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
return RTE_DIM(qede_xstats_strings) +
- (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
}
static int
@@ -1237,6 +1257,7 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
struct qede_dev *qdev = dev->data->dev_private;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
if (xstats_names != NULL) {
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
@@ -1247,7 +1268,9 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
stat_idx++;
}
- for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
snprintf(xstats_names[stat_idx].name,
sizeof(xstats_names[stat_idx].name),
@@ -1271,6 +1294,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct ecore_eth_stats stats;
const unsigned int num = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
if (n < num)
return num;
@@ -1283,7 +1307,9 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
stat_idx++;
}
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(
--
1.7.10.3
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
` (2 preceding siblings ...)
2016-12-31 8:16 ` [dpdk-stable] [PATCH 4/5] net/qede: fix per queue stats/xstats Rasesh Mody
@ 2017-01-05 13:26 ` Ferruh Yigit
2017-01-05 21:12 ` Harish Patil
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 " Rasesh Mody
` (4 subsequent siblings)
8 siblings, 1 reply; 13+ messages in thread
From: Ferruh Yigit @ 2017-01-05 13:26 UTC (permalink / raw)
To: Rasesh Mody, dev; +Cc: Harish Patil, stable, Dept-EngDPDKDev
On 12/31/2016 8:16 AM, Rasesh Mody wrote:
> From: Harish Patil <harish.patil@qlogic.com>
>
> - Make qede_process_sg_pkts() inline and add unlikely check
> - Fix mbuf segment chaining logic in qede_process_sg_pkts()
> - Change qede_encode_sg_bd() to return total segments required
> - Fix first TX buffer descriptor's length
> - Replace repeatitive code using a macro
>
> Fixes: bec0228816c0 ("net/qede: support scatter gather")
>
> Signed-off-by: Harish Patil <harish.patil@qlogic.com>
> ---
Hi Harish,
This patch doesn't apply cleanly on top next-net, after pci_dev changes
rebased into next-net tree.
Can you please send the new version of the patchset rebased on top of
latest next-net?
Thanks,
ferruh
<...>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue
2017-01-05 13:26 ` [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Ferruh Yigit
@ 2017-01-05 21:12 ` Harish Patil
2017-01-05 22:35 ` Mody, Rasesh
0 siblings, 1 reply; 13+ messages in thread
From: Harish Patil @ 2017-01-05 21:12 UTC (permalink / raw)
To: Ferruh Yigit, Mody, Rasesh, dev; +Cc: stable, Dept-Eng DPDK Dev
>
>On 12/31/2016 8:16 AM, Rasesh Mody wrote:
>> From: Harish Patil <harish.patil@qlogic.com>
>>
>> - Make qede_process_sg_pkts() inline and add unlikely check
>> - Fix mbuf segment chaining logic in qede_process_sg_pkts()
>> - Change qede_encode_sg_bd() to return total segments required
>> - Fix first TX buffer descriptor's length
>> - Replace repeatitive code using a macro
>>
>> Fixes: bec0228816c0 ("net/qede: support scatter gather")
>>
>> Signed-off-by: Harish Patil <harish.patil@qlogic.com>
>> ---
>
>Hi Harish,
>
>This patch doesn't apply cleanly on top next-net, after pci_dev changes
>rebased into next-net tree.
>
>Can you please send the new version of the patchset rebased on top of
>latest next-net?
>
>Thanks,
>ferruh
>
><...>
>
Hi Ferruh,
Sure will do that.
Thanks,
Harish
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue
2017-01-05 21:12 ` Harish Patil
@ 2017-01-05 22:35 ` Mody, Rasesh
0 siblings, 0 replies; 13+ messages in thread
From: Mody, Rasesh @ 2017-01-05 22:35 UTC (permalink / raw)
To: Harish Patil, Ferruh Yigit, dev; +Cc: stable, Dept-Eng DPDK Dev
> From: Harish Patil [mailto:harish.patil@qlogic.com]
> Sent: Thursday, January 05, 2017 1:12 PM
>
> >
> >On 12/31/2016 8:16 AM, Rasesh Mody wrote:
> >> From: Harish Patil <harish.patil@qlogic.com>
> >>
> >> - Make qede_process_sg_pkts() inline and add unlikely check
> >> - Fix mbuf segment chaining logic in qede_process_sg_pkts()
> >> - Change qede_encode_sg_bd() to return total segments required
> >> - Fix first TX buffer descriptor's length
> >> - Replace repeatitive code using a macro
> >>
> >> Fixes: bec0228816c0 ("net/qede: support scatter gather")
> >>
> >> Signed-off-by: Harish Patil <harish.patil@qlogic.com>
> >> ---
> >
> >Hi Harish,
> >
> >This patch doesn't apply cleanly on top next-net, after pci_dev changes
> >rebased into next-net tree.
We are not seeing failure when applying [PATCH 1/5] net/qede: fix scatter-gather issue on latest dpdk-next-net. However, we do see a failure when trying to apply [PATCH 2/5] net/qede: fix minimum buffer size and scatter Rx check due to pci_dev changes rebased into next-net tree. We'll submit a v2 series rebased on latest next-net.
Thanks!
-Rasesh
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-stable] [PATCH v2 1/5] net/qede: fix scatter-gather issue
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
` (3 preceding siblings ...)
2017-01-05 13:26 ` [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Ferruh Yigit
@ 2017-01-06 8:16 ` Rasesh Mody
2017-01-06 13:31 ` Ferruh Yigit
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 2/5] net/qede: fix minimum buffer size and scatter Rx check Rasesh Mody
` (3 subsequent siblings)
8 siblings, 1 reply; 13+ messages in thread
From: Rasesh Mody @ 2017-01-06 8:16 UTC (permalink / raw)
To: ferruh.yigit; +Cc: Harish Patil, dev, stable, Dept-EngDPDKDev
From: Harish Patil <harish.patil@qlogic.com>
- Make qede_process_sg_pkts() inline and add unlikely check
- Fix mbuf segment chaining logic in qede_process_sg_pkts()
- Change qede_encode_sg_bd() to return total segments required
- Fix first TX buffer descriptor's length
- Replace repeatitive code using a macro
Fixes: bec0228816c0 ("net/qede: support scatter gather")
Signed-off-by: Harish Patil <harish.patil@qlogic.com>
---
drivers/net/qede/qede_rxtx.c | 139 ++++++++++++++++++++----------------------
drivers/net/qede/qede_rxtx.h | 4 --
2 files changed, 65 insertions(+), 78 deletions(-)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 814d384..ecff5bc 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -810,39 +810,28 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
return RTE_PTYPE_UNKNOWN;
}
-
-int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
- int num_segs, uint16_t pkt_len)
+static inline int
+qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
+ uint8_t num_segs, uint16_t pkt_len)
{
struct qede_rx_queue *rxq = p_rxq;
struct qede_dev *qdev = rxq->qdev;
struct ecore_dev *edev = &qdev->edev;
- uint16_t sw_rx_index, cur_size;
-
register struct rte_mbuf *seg1 = NULL;
register struct rte_mbuf *seg2 = NULL;
+ uint16_t sw_rx_index;
+ uint16_t cur_size;
seg1 = rx_mb;
while (num_segs) {
- cur_size = pkt_len > rxq->rx_buf_size ?
- rxq->rx_buf_size : pkt_len;
- if (!cur_size) {
- PMD_RX_LOG(DEBUG, rxq,
- "SG packet, len and num BD mismatch\n");
+ cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+ if (unlikely(!cur_size)) {
+ PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
+ " left for mapping jumbo\n", num_segs);
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
return -EINVAL;
}
-
- if (qede_alloc_rx_buffer(rxq)) {
- uint8_t index;
-
- PMD_RX_LOG(DEBUG, rxq, "Buffer allocation failed\n");
- index = rxq->port_id;
- rte_eth_devices[index].data->rx_mbuf_alloc_failed++;
- rxq->rx_alloc_errors++;
- return -ENOMEM;
- }
-
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
qede_rx_bd_ring_consume(rxq);
@@ -852,16 +841,9 @@ int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
seg1 = seg1->next;
num_segs--;
rxq->rx_segs++;
- continue;
}
- seg1 = NULL;
-
- if (pkt_len)
- PMD_RX_LOG(DEBUG, rxq,
- "Mapped all BDs of jumbo, but still have %d bytes\n",
- pkt_len);
- return ECORE_SUCCESS;
+ return 0;
}
uint16_t
@@ -878,11 +860,16 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
register struct rte_mbuf *rx_mb = NULL;
register struct rte_mbuf *seg1 = NULL;
enum eth_rx_cqe_type cqe_type;
- uint16_t len, pad, preload_idx, pkt_len, parse_flag;
- uint8_t csum_flag, num_segs;
+ uint16_t pkt_len; /* Sum of all BD segments */
+ uint16_t len; /* Length of first BD */
+ uint8_t num_segs = 1;
+ uint16_t pad;
+ uint16_t preload_idx;
+ uint8_t csum_flag;
+ uint16_t parse_flag;
enum rss_hash_type htype;
uint8_t tunn_parse_flag;
- int ret;
+ uint8_t j;
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -915,6 +902,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
fp_cqe = &cqe->fast_path_regular;
len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+ pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
pad = fp_cqe->placement_offset;
assert((len + pad) <= rx_mb->buf_len);
@@ -979,25 +967,29 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->rx_alloc_errors++;
break;
}
-
qede_rx_bd_ring_consume(rxq);
-
if (fp_cqe->bd_num > 1) {
- pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
+ " len on first: %04x Total Len: %04x\n",
+ fp_cqe->bd_num, len, pkt_len);
num_segs = fp_cqe->bd_num - 1;
-
- rxq->rx_segs++;
-
- pkt_len -= len;
seg1 = rx_mb;
- ret = qede_process_sg_pkts(p_rxq, seg1, num_segs,
- pkt_len);
- if (ret != ECORE_SUCCESS) {
- qede_recycle_rx_bd_ring(rxq, qdev,
- fp_cqe->bd_num);
+ if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
+ pkt_len - len))
goto next_cqe;
+ for (j = 0; j < num_segs; j++) {
+ if (qede_alloc_rx_buffer(rxq)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Buffer allocation failed\n");
+ rte_eth_devices[rxq->port_id].
+ data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ break;
+ }
+ rxq->rx_segs++;
}
}
+ rxq->rx_segs++; /* for the first segment */
/* Prefetch next mbuf while processing current one. */
preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
@@ -1007,7 +999,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
rx_mb->nb_segs = fp_cqe->bd_num;
rx_mb->data_len = len;
- rx_mb->pkt_len = fp_cqe->pkt_len;
+ rx_mb->pkt_len = pkt_len;
rx_mb->port = rxq->port_id;
htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
@@ -1114,17 +1106,16 @@ qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
}
/* Populate scatter gather buffer descriptor fields */
-static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
- struct rte_mbuf *m_seg,
- uint16_t count,
- struct eth_tx_1st_bd *bd1)
+static inline uint8_t
+qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
+ struct eth_tx_1st_bd *bd1)
{
struct qede_tx_queue *txq = p_txq;
struct eth_tx_2nd_bd *bd2 = NULL;
struct eth_tx_3rd_bd *bd3 = NULL;
struct eth_tx_bd *tx_bd = NULL;
- uint16_t nb_segs = count;
dma_addr_t mapping;
+ uint8_t nb_segs = 1; /* min one segment per packet */
/* Check for scattered buffers */
while (m_seg) {
@@ -1133,28 +1124,27 @@ static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
ecore_chain_produce(&txq->tx_pbl);
memset(bd2, 0, sizeof(*bd2));
mapping = rte_mbuf_data_dma_addr(m_seg);
- bd2->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- bd2->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- bd2->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ QEDE_BD_SET_ADDR_LEN(bd2, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD2 len %04x\n",
+ m_seg->data_len);
} else if (nb_segs == 2) {
bd3 = (struct eth_tx_3rd_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(bd3, 0, sizeof(*bd3));
mapping = rte_mbuf_data_dma_addr(m_seg);
- bd3->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- bd3->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- bd3->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ QEDE_BD_SET_ADDR_LEN(bd3, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD3 len %04x\n",
+ m_seg->data_len);
} else {
tx_bd = (struct eth_tx_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(tx_bd, 0, sizeof(*tx_bd));
mapping = rte_mbuf_data_dma_addr(m_seg);
- tx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- tx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- tx_bd->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD len %04x\n",
+ m_seg->data_len);
}
nb_segs++;
- bd1->data.nbds = nb_segs;
m_seg = m_seg->next;
}
@@ -1170,13 +1160,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct ecore_dev *edev = &qdev->edev;
struct qede_fastpath *fp;
struct eth_tx_1st_bd *bd1;
+ struct rte_mbuf *mbuf;
struct rte_mbuf *m_seg = NULL;
uint16_t nb_tx_pkts;
- uint16_t nb_pkt_sent = 0;
uint16_t bd_prod;
uint16_t idx;
uint16_t tx_count;
- uint16_t nb_segs = 0;
+ uint16_t nb_frags;
+ uint16_t nb_pkt_sent = 0;
fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
@@ -1198,19 +1189,19 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
while (nb_tx_pkts--) {
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = TX_PROD(txq);
- struct rte_mbuf *mbuf = *tx_pkts++;
-
+ mbuf = *tx_pkts++;
txq->sw_tx_ring[idx].mbuf = mbuf;
bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
- /* Zero init struct fields */
- bd1->data.bd_flags.bitfields = 0;
- bd1->data.bitfields = 0;
-
bd1->data.bd_flags.bitfields =
1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ /* FW 8.10.x specific change */
+ bd1->data.bitfields =
+ (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
/* Map MBUF linear data for DMA and set in the first BD */
QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- mbuf->pkt_len);
+ mbuf->data_len);
+ PMD_TX_LOG(INFO, txq, "BD1 len %04x\n", mbuf->data_len);
if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) {
PMD_TX_LOG(INFO, txq, "Tx tunnel packet\n");
@@ -1267,18 +1258,18 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Handle fragmented MBUF */
m_seg = mbuf->next;
- nb_segs++;
- bd1->data.nbds = nb_segs;
/* Encode scatter gather buffer descriptors if required */
- nb_segs = qede_encode_sg_bd(txq, m_seg, nb_segs, bd1);
- txq->nb_tx_avail = txq->nb_tx_avail - nb_segs;
- nb_segs = 0;
+ nb_frags = qede_encode_sg_bd(txq, m_seg, bd1);
+ bd1->data.nbds = nb_frags;
+ txq->nb_tx_avail -= nb_frags;
txq->sw_tx_prod++;
rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
bd_prod =
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
nb_pkt_sent++;
txq->xmit_pkts++;
+ PMD_TX_LOG(INFO, txq, "nbds = %d pkt_len = %04x\n",
+ bd1->data.nbds, mbuf->pkt_len);
}
/* Write value of prod idx into bd_prod */
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 2a8645a..a95b4ab 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -41,10 +41,6 @@
(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
(bd)->nbytes = rte_cpu_to_le_16(len); \
- /* FW 8.10.x specific change */ \
- (bd)->data.bitfields = ((len) & \
- ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) \
- << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; \
} while (0)
#define CQE_HAS_VLAN(flags) \
--
1.7.10.3
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-stable] [PATCH v2 1/5] net/qede: fix scatter-gather issue
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 " Rasesh Mody
@ 2017-01-06 13:31 ` Ferruh Yigit
0 siblings, 0 replies; 13+ messages in thread
From: Ferruh Yigit @ 2017-01-06 13:31 UTC (permalink / raw)
To: Rasesh Mody; +Cc: Harish Patil, dev, stable, Dept-EngDPDKDev
On 1/6/2017 8:16 AM, Rasesh Mody wrote:
> From: Harish Patil <harish.patil@qlogic.com>
>
> - Make qede_process_sg_pkts() inline and add unlikely check
> - Fix mbuf segment chaining logic in qede_process_sg_pkts()
> - Change qede_encode_sg_bd() to return total segments required
> - Fix first TX buffer descriptor's length
> - Replace repeatitive code using a macro
>
> Fixes: bec0228816c0 ("net/qede: support scatter gather")
>
> Signed-off-by: Harish Patil <harish.patil@qlogic.com>
Series applied to dpdk-next-net/master, thanks.
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-stable] [PATCH v2 2/5] net/qede: fix minimum buffer size and scatter Rx check
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
` (4 preceding siblings ...)
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 " Rasesh Mody
@ 2017-01-06 8:16 ` Rasesh Mody
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 3/5] net/qede: fix PF fastpath status block index Rasesh Mody
` (2 subsequent siblings)
8 siblings, 0 replies; 13+ messages in thread
From: Rasesh Mody @ 2017-01-06 8:16 UTC (permalink / raw)
To: ferruh.yigit; +Cc: Harish Patil, dev, stable, Dept-EngDPDKDev
From: Harish Patil <harish.patil@qlogic.com>
- Fix minimum RX buffer size to 1024B
- Force enable scatter/gather mode if given RX buf size is lesser than MTU
- Adjust RX buffer size to cache-line size with overhead included
Fixes: bec0228816c0 ("net/qede: support scatter gather")
Fixes: 2ea6f76aff40 ("qede: add core driver")
Signed-off-by: Harish Patil <harish.patil@qlogic.com>
---
drivers/net/qede/qede_ethdev.c | 3 +--
drivers/net/qede/qede_rxtx.c | 47 +++++++++++++++++-----------------------
drivers/net/qede/qede_rxtx.h | 11 ++++++++--
3 files changed, 30 insertions(+), 31 deletions(-)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index b7886f4..0b40d1b 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -969,8 +969,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
- dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
- QEDE_ETH_OVERHEAD);
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->tx_desc_lim = qede_tx_desc_lim;
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index ecff5bc..aebe8cb 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -89,11 +89,11 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
{
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct rte_eth_dev_data *eth_data = dev->data;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
- uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint16_t max_rx_pkt_len;
+ uint16_t bufsz;
size_t size;
- uint16_t data_size;
int rc;
int i;
@@ -127,34 +127,27 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
-
- /* Sanity check */
- data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
- RTE_PKTMBUF_HEADROOM;
-
- if (pkt_len > data_size && !dev->data->scattered_rx) {
- DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
- pkt_len, data_size);
- rte_free(rxq);
- return -EINVAL;
+ max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+ qdev->mtu = max_rx_pkt_len;
+
+ /* Fix up RX buffer size */
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ if ((rxmode->enable_scatter) ||
+ (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if (!dev->data->scattered_rx) {
+ DP_INFO(edev, "Forcing scatter-gather mode\n");
+ dev->data->scattered_rx = 1;
+ }
}
-
if (dev->data->scattered_rx)
- rxq->rx_buf_size = data_size;
+ rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
- rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
-
- qdev->mtu = pkt_len;
+ rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
+ /* Align to cache-line size if needed */
+ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
- DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
- qdev->mtu, rxq->rx_buf_size);
-
- if (pkt_len > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- DP_NOTICE(edev, false, "jumbo frame enabled\n");
- } else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
- }
+ DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+ qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index a95b4ab..9a393e9 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -51,14 +51,21 @@
((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+#define QEDE_MIN_RX_BUFF_SIZE (1024)
+#define QEDE_VLAN_TAG_SIZE (4)
+#define QEDE_LLC_SNAP_HDR_LEN (8)
+
/* Max supported alignment is 256 (8 shift)
* minimal alignment shift 6 is optimal for 57xxx HW performance
*/
#define QEDE_L1_CACHE_SHIFT 6
#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
-
-#define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+ ~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD ((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN))
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
ETH_RSS_NONFRAG_IPV4_TCP |\
--
1.7.10.3
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-stable] [PATCH v2 3/5] net/qede: fix PF fastpath status block index
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
` (5 preceding siblings ...)
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 2/5] net/qede: fix minimum buffer size and scatter Rx check Rasesh Mody
@ 2017-01-06 8:16 ` Rasesh Mody
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 4/5] net/qede: fix per queue stats/xstats Rasesh Mody
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 5/5] net/qede: convert few DP_NOTICE and DP_INFO to DP_ERR Rasesh Mody
8 siblings, 0 replies; 13+ messages in thread
From: Rasesh Mody @ 2017-01-06 8:16 UTC (permalink / raw)
To: ferruh.yigit; +Cc: Harish Patil, dev, stable, Dept-EngDPDKDev
From: Harish Patil <harish.patil@qlogic.com>
Allocate double the number of fastpath status block index
since the PF RX/TX queues are not sharing the status block.
This is an interim solution till other parts of the code
is modified to handle the same.
Fixes: f1e4b6c0acee ("net/qede: fix status block index for VF queues")
Signed-off-by: Harish Patil <harish.patil@qlogic.com>
---
drivers/net/qede/qede_rxtx.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index aebe8cb..f20881c 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -431,13 +431,15 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
struct ecore_dev *edev = &qdev->edev;
struct qede_fastpath *fp;
uint32_t num_sbs;
- int rc, i;
+ uint16_t i;
+ uint16_t sb_idx;
+ int rc;
if (IS_VF(edev))
ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
else
- num_sbs = (ecore_cxt_get_proto_cid_count
- (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL)) / 2;
+ num_sbs = ecore_cxt_get_proto_cid_count
+ (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
if (num_sbs == 0) {
DP_ERR(edev, "No status blocks available\n");
@@ -455,7 +457,11 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
fp = &qdev->fp_array[i];
- if (qede_alloc_mem_sb(qdev, fp->sb_info, i % num_sbs)) {
+ if (IS_VF(edev))
+ sb_idx = i % num_sbs;
+ else
+ sb_idx = i;
+ if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
qede_free_fp_arrays(qdev);
return -ENOMEM;
}
--
1.7.10.3
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-stable] [PATCH v2 4/5] net/qede: fix per queue stats/xstats
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
` (6 preceding siblings ...)
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 3/5] net/qede: fix PF fastpath status block index Rasesh Mody
@ 2017-01-06 8:16 ` Rasesh Mody
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 5/5] net/qede: convert few DP_NOTICE and DP_INFO to DP_ERR Rasesh Mody
8 siblings, 0 replies; 13+ messages in thread
From: Rasesh Mody @ 2017-01-06 8:16 UTC (permalink / raw)
To: ferruh.yigit; +Cc: Rasesh Mody, dev, stable, Dept-EngDPDKDev
From: Rasesh Mody <Rasesh.Mody@cavium.com>
If value of number of rxq/txq is diffrent than
RTE_ETHDEV_QUEUE_STAT_CNTRS, limit per queue
stats/xstats to minimum of the two.
Fixes: 7634c5f91569 ("net/qede: add queue statistics")
Signed-off-by: Rasesh Mody <Rasesh.Mody@cavium.com>
---
drivers/net/qede/qede_ethdev.c | 32 +++++++++++++++++++++++++++++---
1 file changed, 29 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 0b40d1b..6d90c46 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1162,6 +1162,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
qdev->ops->get_vport_stats(edev, &stats);
@@ -1195,6 +1196,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
eth_stats->oerrors = stats.tx_err_drop_pkts;
/* Queue stats */
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG,
+ "Not all the queue stats will be displayed. Set"
+ " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
+ " appropriately and retry.\n");
+
for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
eth_stats->q_ipackets[i] =
@@ -1213,7 +1225,11 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
rx_alloc_errors));
i++;
}
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
txq = qdev->fp_array[(qid)].txqs[0];
eth_stats->q_opackets[j] =
@@ -1223,13 +1239,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
xmit_pkts)));
j++;
}
+ if (j == txq_stat_cntrs)
+ break;
}
}
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
return RTE_DIM(qede_xstats_strings) +
- (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
}
static int
@@ -1239,6 +1259,7 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
struct qede_dev *qdev = dev->data->dev_private;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
if (xstats_names != NULL) {
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
@@ -1249,7 +1270,9 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
stat_idx++;
}
- for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
snprintf(xstats_names[stat_idx].name,
sizeof(xstats_names[stat_idx].name),
@@ -1273,6 +1296,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct ecore_eth_stats stats;
const unsigned int num = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
if (n < num)
return num;
@@ -1285,7 +1309,9 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
stat_idx++;
}
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(
--
1.7.10.3
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-stable] [PATCH v2 5/5] net/qede: convert few DP_NOTICE and DP_INFO to DP_ERR
2016-12-31 8:16 [dpdk-stable] [PATCH 1/5] net/qede: fix scatter-gather issue Rasesh Mody
` (7 preceding siblings ...)
2017-01-06 8:16 ` [dpdk-stable] [PATCH v2 4/5] net/qede: fix per queue stats/xstats Rasesh Mody
@ 2017-01-06 8:16 ` Rasesh Mody
8 siblings, 0 replies; 13+ messages in thread
From: Rasesh Mody @ 2017-01-06 8:16 UTC (permalink / raw)
To: ferruh.yigit; +Cc: Rasesh Mody, dev, stable, Dept-EngDPDKDev
From: Rasesh Mody <Rasesh.Mody@cavium.com>
Signed-off-by: Rasesh Mody <Rasesh.Mody@cavium.com>
---
drivers/net/qede/base/ecore_mcp.c | 2 +-
drivers/net/qede/qede_ethdev.c | 11 +++++------
2 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index bb13828..f634d98 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -931,7 +931,7 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
break;
default:
- DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
+ DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
return;
}
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 6d90c46..c67fbb6 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -650,7 +650,7 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
qede_vlan_filter_set(eth_dev, 0, 1);
} else {
if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
- DP_NOTICE(edev, false,
+ DP_ERR(edev,
" Please remove existing VLAN filters"
" before disabling VLAN filtering\n");
/* Signal app that VLAN filtering is still
@@ -684,7 +684,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
if (on) {
if (qdev->configured_vlans == dev_info->num_vlan_filters) {
- DP_INFO(edev, "Reached max VLAN filter limit"
+ DP_ERR(edev, "Reached max VLAN filter limit"
" enabling accept_any_vlan\n");
qede_config_accept_any_vlan(qdev, true);
return 0;
@@ -849,14 +849,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
eth_dev->data->nb_tx_queues < 2) {
- DP_NOTICE(edev, false,
- "100G mode needs min. 2 RX/TX queues\n");
+ DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
(eth_dev->data->nb_tx_queues % 2 != 0)) {
- DP_NOTICE(edev, false,
+ DP_ERR(edev,
"100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
@@ -867,7 +866,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
eth_dev->data->scattered_rx = 1;
if (rxmode->enable_lro == 1) {
- DP_INFO(edev, "LRO is not supported\n");
+ DP_ERR(edev, "LRO is not supported\n");
return -EINVAL;
}
--
1.7.10.3
^ permalink raw reply [flat|nested] 13+ messages in thread