From: Wei Hu <weh@linux.microsoft.com>
To: ferruh.yigit@amd.com, andrew.rybchenko@oktetlabs.ru,
thomas@monjalon.net, Long Li <longli@microsoft.com>
Cc: dev@dpdk.org, Wei Hu <weh@linux.microsoft.com>,
Wei Hu <weh@microsoft.com>
Subject: [PATCH v3 1/1] net/mana: add vlan tagging support
Date: Wed, 13 Mar 2024 09:03:40 +0000 [thread overview]
Message-ID: <20240313090341.373037-1-weh@linux.microsoft.com> (raw)
For tx path, use LONG_PACKET_FORMAT if vlan tag is present. For rx,
extract vlan id from oob, put into mbuf and set the vlan flags in
mbuf.
Signed-off-by: Wei Hu <weh@microsoft.com>
---
v3:
- Adjust the pkt_idx position in the code so it will be executed even when
adding vlan header fails.
v2:
- Use existing vlan tag processing macros.
- Add vlan header back if vlan_strip flag is not set on the receiving path.
drivers/net/mana/mana.c | 3 +++
drivers/net/mana/mana.h | 4 ++++
drivers/net/mana/rx.c | 22 ++++++++++++++++++----
drivers/net/mana/tx.c | 21 ++++++++++++++++++---
4 files changed, 43 insertions(+), 7 deletions(-)
diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c
index 2df2461d2f..68c625258e 100644
--- a/drivers/net/mana/mana.c
+++ b/drivers/net/mana/mana.c
@@ -94,6 +94,9 @@ mana_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
+ priv->vlan_strip = !!(dev_conf->rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
+
priv->num_queues = dev->data->nb_rx_queues;
manadv_set_context_attr(priv->ib_ctx, MANADV_CTX_ATTR_BUF_ALLOCATORS,
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 3626925871..37f654f0e6 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -21,10 +21,12 @@ struct mana_shared_data {
#define MANA_MAX_MAC_ADDR 1
#define MANA_DEV_RX_OFFLOAD_SUPPORT ( \
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
RTE_ETH_RX_OFFLOAD_CHECKSUM | \
RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define MANA_DEV_TX_OFFLOAD_SUPPORT ( \
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
@@ -345,6 +347,8 @@ struct mana_priv {
/* IB device port */
uint8_t dev_port;
+ uint8_t vlan_strip;
+
struct ibv_context *ib_ctx;
struct ibv_pd *ib_pd;
struct ibv_pd *ib_parent_pd;
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index 16e647baf5..0c26702b73 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -532,10 +532,6 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
mbuf->hash.rss = oob->packet_info[pkt_idx].packet_hash;
}
- pkts[pkt_received++] = mbuf;
- rxq->stats.packets++;
- rxq->stats.bytes += mbuf->data_len;
-
pkt_idx++;
/* Move on the next completion if all packets are processed */
if (pkt_idx >= RX_COM_OOB_NUM_PACKETINFO_SEGMENTS) {
@@ -543,6 +539,24 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
i++;
}
+ if (oob->rx_vlan_tag_present) {
+ mbuf->ol_flags |=
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci = oob->rx_vlan_id;
+
+ if (!priv->vlan_strip && rte_vlan_insert(&mbuf)) {
+ DRV_LOG(ERR, "vlan insert failed");
+ rxq->stats.errors++;
+ rte_pktmbuf_free(mbuf);
+
+ goto drop;
+ }
+ }
+
+ pkts[pkt_received++] = mbuf;
+ rxq->stats.packets++;
+ rxq->stats.bytes += mbuf->data_len;
+
drop:
rxq->desc_ring_tail++;
if (rxq->desc_ring_tail >= rxq->num_desc)
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 58c4a1d976..272a28bcba 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -254,7 +254,18 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Fill in the oob */
- tx_oob.short_oob.packet_format = SHORT_PACKET_FORMAT;
+ if (m_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
+ tx_oob.short_oob.packet_format = LONG_PACKET_FORMAT;
+ tx_oob.long_oob.inject_vlan_prior_tag = 1;
+ tx_oob.long_oob.priority_code_point =
+ RTE_VLAN_TCI_PRI(m_pkt->vlan_tci);
+ tx_oob.long_oob.drop_eligible_indicator =
+ RTE_VLAN_TCI_DEI(m_pkt->vlan_tci);
+ tx_oob.long_oob.vlan_identifier =
+ RTE_VLAN_TCI_ID(m_pkt->vlan_tci);
+ } else {
+ tx_oob.short_oob.packet_format = SHORT_PACKET_FORMAT;
+ }
tx_oob.short_oob.tx_is_outer_ipv4 =
m_pkt->ol_flags & RTE_MBUF_F_TX_IPV4 ? 1 : 0;
tx_oob.short_oob.tx_is_outer_ipv6 =
@@ -409,8 +420,12 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
work_req.sgl = sgl.gdma_sgl;
work_req.num_sgl_elements = m_pkt->nb_segs;
- work_req.inline_oob_size_in_bytes =
- sizeof(struct transmit_short_oob_v2);
+ if (tx_oob.short_oob.packet_format == SHORT_PACKET_FORMAT)
+ work_req.inline_oob_size_in_bytes =
+ sizeof(struct transmit_short_oob_v2);
+ else
+ work_req.inline_oob_size_in_bytes =
+ sizeof(struct transmit_oob_v2);
work_req.inline_oob_data = &tx_oob;
work_req.flags = 0;
work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
--
2.34.1
next reply other threads:[~2024-03-13 10:06 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-13 9:03 Wei Hu [this message]
2024-03-13 17:57 ` Long Li
2024-03-13 18:47 ` Patrick Robb
2024-03-14 9:05 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240313090341.373037-1-weh@linux.microsoft.com \
--to=weh@linux.microsoft.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=longli@microsoft.com \
--cc=thomas@monjalon.net \
--cc=weh@microsoft.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).