From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, <konstantin.ananyev@intel.com>,
Radu Nicolau <radu.nicolau@intel.com>,
Akhil Goyal <gakhil@marvell.com>
Cc: <dev@dpdk.org>, <anoobj@marvell.com>,
Nithin Dabilpuram <ndabilpuram@marvell.com>
Subject: [PATCH v3 5/7] examples/ipsec-secgw: get security context from lcore conf
Date: Thu, 28 Apr 2022 20:34:57 +0530 [thread overview]
Message-ID: <20220428150459.23950-5-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20220428150459.23950-1-ndabilpuram@marvell.com>
Store security context pointer in lcore Rx queue config and
get it from there in fast path for better performance.
Currently rte_eth_dev_get_sec_ctx() which is meant to be control
path API is called per packet basis. For every call to that
API, ethdev port status is checked.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 22 ++++++++++++++++++---
examples/ipsec-secgw/ipsec.h | 1 +
examples/ipsec-secgw/ipsec_worker.h | 39 +++++++++++++++++--------------------
3 files changed, 38 insertions(+), 24 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 88984a6..14b9c06 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -544,11 +544,11 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
- uint8_t nb_pkts, uint16_t portid)
+ uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx)
{
struct ipsec_traffic traffic;
- prepare_traffic(pkts, &traffic, nb_pkts);
+ prepare_traffic(ctx, pkts, &traffic, nb_pkts);
if (unlikely(single_sa)) {
if (is_unprotected_port(portid))
@@ -740,7 +740,8 @@ ipsec_poll_mode_worker(void)
if (nb_rx > 0) {
core_stats_update_rx(nb_rx);
- process_pkts(qconf, pkts, nb_rx, portid);
+ process_pkts(qconf, pkts, nb_rx, portid,
+ rxql->sec_ctx);
}
/* dequeue and process completed crypto-ops */
@@ -3060,6 +3061,21 @@ main(int32_t argc, char **argv)
flow_init();
+ /* Get security context if available and only if dynamic field is
+ * registered for fast path access.
+ */
+ if (!rte_security_dynfield_is_registered())
+ goto skip_sec_ctx;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
+ portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
+ lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
+ rte_eth_dev_get_sec_ctx(portid);
+ }
+ }
+skip_sec_ctx:
+
check_all_ports_link_status(enabled_port_mask);
if (stats_interval > 0)
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 9a4e7ea..ecad262 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -269,6 +269,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
uint8_t queue_id;
+ struct rte_security_ctx *sec_ctx;
} __rte_cache_aligned;
struct buffer {
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 7397291..b1fc364 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -88,7 +88,7 @@ prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
}
}
-static inline void
+static __rte_always_inline void
adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
uint32_t l2_len)
{
@@ -101,7 +101,7 @@ adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
}
}
-static inline void
+static __rte_always_inline void
adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
uint32_t l2_len)
{
@@ -114,8 +114,9 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
}
}
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+static __rte_always_inline void
+prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt,
+ struct ipsec_traffic *t)
{
uint32_t ptype = pkt->packet_type;
const struct rte_ether_hdr *eth;
@@ -203,13 +204,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
* with the security session.
*/
- if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
+ if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
/* Retrieve the userdata registered. Here, the userdata
* registered is the SA pointer.
@@ -230,9 +227,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
}
}
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
+static __rte_always_inline void
+prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
+ struct ipsec_traffic *t, uint16_t nb_pkts)
{
int32_t i;
@@ -243,11 +240,11 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
void *));
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
/* Process left packets */
for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
+ prepare_one_packet(ctx, pkts[i], t);
}
static inline void
@@ -305,7 +302,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
}
/* Send burst of packets on an output interface */
-static inline int32_t
+static __rte_always_inline int32_t
send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
@@ -333,7 +330,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
/*
* Helper function to fragment and queue for TX one packet.
*/
-static inline uint32_t
+static __rte_always_inline uint32_t
send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
uint16_t port, uint8_t proto)
{
@@ -372,7 +369,7 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
}
/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
+static __rte_always_inline int32_t
send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
{
uint32_t lcore_id;
@@ -404,7 +401,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
return 0;
}
-static inline void
+static __rte_always_inline void
inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
uint16_t lim, struct ipsec_spd_stats *stats)
{
@@ -451,7 +448,7 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
ip->num = j;
}
-static inline int32_t
+static __rte_always_inline int32_t
get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
{
struct ipsec_mbuf_metadata *priv;
@@ -531,7 +528,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
}
-static inline void
+static __rte_always_inline void
route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
int32_t hop[MAX_PKT_BURST * 2];
@@ -585,7 +582,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
}
-static inline void
+static __rte_always_inline void
drain_tx_buffers(struct lcore_conf *qconf)
{
struct buffer *buf;
--
2.8.4
next prev parent reply other threads:[~2022-04-28 15:05 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-14 15:43 ` Ananyev, Konstantin
2022-03-22 17:58 ` [PATCH 3/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 4/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-04-13 6:13 ` [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Kumar Dabilpuram
2022-04-14 14:07 ` Ananyev, Konstantin
2022-04-19 13:56 ` Nithin Kumar Dabilpuram
2022-04-20 10:42 ` Ananyev, Konstantin
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-21 13:31 ` [PATCH v2 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-28 15:04 ` Nithin Dabilpuram [this message]
2022-04-28 15:04 ` [PATCH v3 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-04-29 10:23 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Kumar Dabilpuram
2022-04-29 10:29 ` Akhil Goyal
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-05-01 17:10 ` Konstantin Ananyev
2022-04-29 20:44 ` [PATCH v4 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-29 20:44 ` [PATCH v4 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-05-11 19:34 ` [PATCH v4 1/7] examples/ipsec-secgw: move fast path helper functions Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220428150459.23950-5-ndabilpuram@marvell.com \
--to=ndabilpuram@marvell.com \
--cc=anoobj@marvell.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=jerinj@marvell.com \
--cc=konstantin.ananyev@intel.com \
--cc=radu.nicolau@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).