* [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto
@ 2019-07-10 10:57 Bernard Iremonger
2019-07-10 10:57 ` [dpdk-stable] [DPDK 2/2] examples/ipsec-secgw/test: fix inline test scripts Bernard Iremonger
2019-07-10 12:58 ` [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto Thomas Monjalon
0 siblings, 2 replies; 7+ messages in thread
From: Bernard Iremonger @ 2019-07-10 10:57 UTC (permalink / raw)
To: qabuild; +Cc: Bernard Iremonger, stable
Inline crypto installs a flow rule in the NIC. This flow
rule must be installed before the first inbound packet is
received.
The create_session() function installs the flow rule,
create_session() has been refactored into create_inline_session()
and create_lookaside_session(). The create_inline_session() function
uses the socket_ctx data and is now called at initialisation in
sa_add_rules().
The max_session_size() function has been added to calculate memory
requirements.
The cryprodev_init() function has been refactored to drop calls to
rte_mempool_create() and to drop calculation of memory requirements.
The main() function has been refactored to call max_session_size() and
to call session_pool_init() and session_priv_pool_init() earlier.
The ports are started now before adding a flow rule in main().
The sa_init(), sp4_init(), sp6_init() and rt_init() functions are
now called after the ports have been started.
The rte_ipsec_session_prepare() function is called in fill_ipsec_session()
for inline which is called from the ipsec_sa_init() function.
Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload")
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: stable@dpdk.org
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 244 +++++++++----------
examples/ipsec-secgw/ipsec.c | 456 ++++++++++++++++++-----------------
examples/ipsec-secgw/ipsec.h | 5 +-
examples/ipsec-secgw/ipsec_process.c | 9 +-
examples/ipsec-secgw/sa.c | 46 +++-
5 files changed, 405 insertions(+), 355 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index b1ecbb9..f4819ce 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1802,7 +1802,7 @@ cryptodevs_init(void)
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
uint16_t idx, max_nb_qps, qp, i;
- int16_t cdev_id, port_id;
+ int16_t cdev_id;
struct rte_hash_parameters params = { 0 };
const uint64_t mseg_flag = multi_seg_required() ?
@@ -1828,45 +1828,6 @@ cryptodevs_init(void)
printf("lcore/cryptodev/qp mappings:\n");
- uint32_t max_sess_sz = 0, sess_sz;
- for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
- void *sec_ctx;
-
- /* Get crypto priv session size */
- sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
-
- /*
- * If crypto device is security capable, need to check the
- * size of security session as well.
- */
-
- /* Get security context of the crypto device */
- sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
- if (sec_ctx == NULL)
- continue;
-
- /* Get size of security session */
- sess_sz = rte_security_session_get_size(sec_ctx);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
- }
- RTE_ETH_FOREACH_DEV(port_id) {
- void *sec_ctx;
-
- if ((enabled_port_mask & (1 << port_id)) == 0)
- continue;
-
- sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
- if (sec_ctx == NULL)
- continue;
-
- sess_sz = rte_security_session_get_size(sec_ctx);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
- }
-
idx = 0;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
struct rte_cryptodev_info cdev_info;
@@ -1911,45 +1872,6 @@ cryptodevs_init(void)
"Device does not support at least %u "
"sessions", CDEV_MP_NB_OBJS);
- if (!socket_ctx[dev_conf.socket_id].session_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_%u", dev_conf.socket_id);
- sess_mp = rte_cryptodev_sym_session_pool_create(
- mp_name, CDEV_MP_NB_OBJS,
- 0, CDEV_MP_CACHE_SZ, 0,
- dev_conf.socket_id);
- socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
- }
-
- if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_priv_%u", dev_conf.socket_id);
- sess_mp = rte_mempool_create(mp_name,
- CDEV_MP_NB_OBJS,
- max_sess_sz,
- CDEV_MP_CACHE_SZ,
- 0, NULL, NULL, NULL,
- NULL, dev_conf.socket_id,
- 0);
- socket_ctx[dev_conf.socket_id].session_priv_pool =
- sess_mp;
- }
-
- if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
- !socket_ctx[dev_conf.socket_id].session_pool)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool on socket %d\n",
- dev_conf.socket_id);
- else
- printf("Allocated session pool on socket %d\n",
- dev_conf.socket_id);
-
if (rte_cryptodev_configure(cdev_id, &dev_conf))
rte_panic("Failed to initialize cryptodev %u\n",
cdev_id);
@@ -1970,39 +1892,6 @@ cryptodevs_init(void)
cdev_id);
}
- /* create session pools for eth devices that implement security */
- RTE_ETH_FOREACH_DEV(port_id) {
- if ((enabled_port_mask & (1 << port_id)) &&
- rte_eth_dev_get_sec_ctx(port_id)) {
- int socket_id = rte_eth_dev_socket_id(port_id);
-
- if (!socket_ctx[socket_id].session_priv_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_%u", socket_id);
- sess_mp = rte_mempool_create(mp_name,
- (CDEV_MP_NB_OBJS * 2),
- max_sess_sz,
- CDEV_MP_CACHE_SZ,
- 0, NULL, NULL, NULL,
- NULL, socket_id,
- 0);
- if (sess_mp == NULL)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool "
- "on socket %d\n", socket_id);
- else
- printf("Allocated session pool "
- "on socket %d\n", socket_id);
- socket_ctx[socket_id].session_priv_pool =
- sess_mp;
- }
- }
- }
-
-
printf("\n");
return 0;
@@ -2174,6 +2063,99 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
printf("\n");
}
+static size_t
+max_session_size(void)
+{
+ size_t max_sz, sz;
+ void *sec_ctx;
+ int16_t cdev_id, port_id, n;
+
+ max_sz = 0;
+ n = rte_cryptodev_count();
+ for (cdev_id = 0; cdev_id != n; cdev_id++) {
+ sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (sz > max_sz)
+ max_sz = sz;
+ /*
+ * If crypto device is security capable, need to check the
+ * size of security session as well.
+ */
+
+ /* Get security context of the crypto device */
+ sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ /* Get size of security session */
+ sz = rte_security_session_get_size(sec_ctx);
+ if (sz > max_sz)
+ max_sz = sz;
+ }
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ sz = rte_security_session_get_size(sec_ctx);
+ if (sz > max_sz)
+ max_sz = sz;
+ }
+
+ return max_sz;
+}
+
+static void
+session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
+{
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+ sess_mp = rte_cryptodev_sym_session_pool_create(
+ mp_name, CDEV_MP_NB_OBJS,
+ sess_sz, CDEV_MP_CACHE_SZ, 0,
+ socket_id);
+ ctx->session_pool = sess_mp;
+
+ if (ctx->session_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init session pool on socket %d\n", socket_id);
+ else
+ printf("Allocated session pool on socket %d\n", socket_id);
+}
+
+static void
+session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
+ size_t sess_sz)
+{
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_priv_%u", socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+ ctx->session_priv_pool = sess_mp;
+
+ if (ctx->session_priv_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init session priv pool on socket %d\n",
+ socket_id);
+ else
+ printf("Allocated session priv pool on socket %d\n",
+ socket_id);
+}
+
static void
pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
{
@@ -2397,9 +2379,11 @@ main(int32_t argc, char **argv)
{
int32_t ret;
uint32_t lcore_id;
+ uint32_t i;
uint8_t socket_id;
uint16_t portid;
uint64_t req_rx_offloads, req_tx_offloads;
+ size_t sess_sz;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -2427,7 +2411,8 @@ main(int32_t argc, char **argv)
nb_lcores = rte_lcore_count();
- /* Replicate each context per socket */
+ sess_sz = max_session_size();
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
@@ -2437,20 +2422,14 @@ main(int32_t argc, char **argv)
else
socket_id = 0;
+ /* mbuf_pool is initialised by the pool_init() function*/
if (socket_ctx[socket_id].mbuf_pool)
continue;
- /* initilaze SPD */
- sp4_init(&socket_ctx[socket_id], socket_id);
-
- sp6_init(&socket_ctx[socket_id], socket_id);
-
- /* initilaze SAD */
- sa_init(&socket_ctx[socket_id], socket_id);
-
- rt_init(&socket_ctx[socket_id], socket_id);
-
pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
+ session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
+ session_priv_pool_init(&socket_ctx[socket_id], socket_id,
+ sess_sz);
}
RTE_ETH_FOREACH_DEV(portid) {
@@ -2468,7 +2447,11 @@ main(int32_t argc, char **argv)
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- /* Start device */
+ /*
+ * Start device
+ * note: device must be started before a flow rule
+ * can be installed.
+ */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
@@ -2493,6 +2476,19 @@ main(int32_t argc, char **argv)
rte_exit(EXIT_FAILURE, "failed at reassemble init");
}
+ /* Replicate each context per socket */
+ for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
+ socket_id = rte_socket_id_by_idx(i);
+ if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+ (socket_ctx[socket_id].sa_in == NULL) &&
+ (socket_ctx[socket_id].sa_out == NULL)) {
+ sa_init(&socket_ctx[socket_id], socket_id);
+ sp4_init(&socket_ctx[socket_id], socket_id);
+ sp6_init(&socket_ctx[socket_id], socket_id);
+ rt_init(&socket_ctx[socket_id], socket_id);
+ }
+ }
+
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index d1cbdc3..0f23cb1 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -40,7 +40,7 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
}
int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
+create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
@@ -53,19 +53,17 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
- if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
- ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
- (void **)&cdev_id_qp);
- if (ret < 0) {
- RTE_LOG(ERR, IPSEC,
+ ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
+ (void **)&cdev_id_qp);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
"No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u, aead_algo %u\n",
key.lcore_id,
key.cipher_algo,
key.auth_algo,
key.aead_algo);
- return -1;
- }
+ return -1;
}
RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
@@ -107,231 +105,268 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
"SEC Session init failed: err: %d\n", ret);
return -1;
}
- } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
- struct rte_flow_error err;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- sa->portid);
- const struct rte_security_capability *sec_cap;
- int ret = 0;
-
- sa->sec_session = rte_security_session_create(ctx,
- &sess_conf, ipsec_ctx->session_priv_pool);
- if (sa->sec_session == NULL) {
- RTE_LOG(ERR, IPSEC,
- "SEC Session init failed: err: %d\n", ret);
- return -1;
- }
-
- sec_cap = rte_security_capabilities_get(ctx);
+ } else {
+ RTE_LOG(ERR, IPSEC, "Inline not supported\n");
+ return -1;
+ }
+ } else {
+ sa->crypto_session = rte_cryptodev_sym_session_create(
+ ipsec_ctx->session_pool);
+ rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+ sa->crypto_session, sa->xforms,
+ ipsec_ctx->session_priv_pool);
- /* iterate until ESP tunnel*/
- while (sec_cap->action !=
- RTE_SECURITY_ACTION_TYPE_NONE) {
+ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
+ &cdev_info);
+ }
- if (sec_cap->action == sa->type &&
- sec_cap->protocol ==
- RTE_SECURITY_PROTOCOL_IPSEC &&
- sec_cap->ipsec.mode ==
- sess_conf.ipsec.mode &&
- sec_cap->ipsec.direction == sa->direction)
- break;
- sec_cap++;
- }
+ sa->cdev_id_qp = cdev_id_qp;
- if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
- RTE_LOG(ERR, IPSEC,
- "No suitable security capability found\n");
- return -1;
- }
+ return 0;
+}
- sa->ol_flags = sec_cap->ol_flags;
- sa->security_ctx = ctx;
- sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
-
- if (IS_IP6(sa->flags)) {
- sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
- sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
- sa->pattern[1].spec = &sa->ipv6_spec;
-
- memcpy(sa->ipv6_spec.hdr.dst_addr,
- sa->dst.ip.ip6.ip6_b, 16);
- memcpy(sa->ipv6_spec.hdr.src_addr,
- sa->src.ip.ip6.ip6_b, 16);
- } else if (IS_IP4(sa->flags)) {
- sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
- sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
- sa->pattern[1].spec = &sa->ipv4_spec;
-
- sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
- sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
- }
+int
+create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa)
+{
+ int32_t ret = 0;
+ struct rte_security_ctx *sec_ctx;
+ struct rte_security_session_conf sess_conf = {
+ .action_type = sa->type,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .spi = sa->spi,
+ .salt = sa->salt,
+ .options = { 0 },
+ .direction = sa->direction,
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = (sa->flags == IP4_TUNNEL ||
+ sa->flags == IP6_TUNNEL) ?
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ } },
+ .crypto_xform = sa->xforms,
+ .userdata = NULL,
+ };
+
+ RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
+ sa->spi, sa->portid);
+
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ struct rte_flow_error err;
+ const struct rte_security_capability *sec_cap;
+ int ret = 0;
+
+ sec_ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ sa->portid);
+ if (sec_ctx == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ " rte_eth_dev_get_sec_ctx failed\n");
+ return -1;
+ }
- sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
- sa->pattern[2].spec = &sa->esp_spec;
- sa->pattern[2].mask = &rte_flow_item_esp_mask;
- sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
+ sa->sec_session = rte_security_session_create(sec_ctx,
+ &sess_conf, skt_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "SEC Session init failed: err: %d\n", ret);
+ return -1;
+ }
- sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+ sec_cap = rte_security_capabilities_get(sec_ctx);
+
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (sec_cap->action == sa->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
- sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
- sa->action[0].conf = sa->sec_session;
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
+ "No suitable security capability found\n");
+ return -1;
+ }
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ sa->ol_flags = sec_cap->ol_flags;
+ sa->security_ctx = sec_ctx;
+ sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+
+ if (IS_IP6(sa->flags)) {
+ sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
+ sa->pattern[1].spec = &sa->ipv6_spec;
+
+ memcpy(sa->ipv6_spec.hdr.dst_addr,
+ sa->dst.ip.ip6.ip6_b, 16);
+ memcpy(sa->ipv6_spec.hdr.src_addr,
+ sa->src.ip.ip6.ip6_b, 16);
+ } else if (IS_IP4(sa->flags)) {
+ sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ sa->pattern[1].spec = &sa->ipv4_spec;
+
+ sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
+ sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
+ }
- sa->attr.egress = (sa->direction ==
- RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
- sa->attr.ingress = (sa->direction ==
- RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
- if (sa->attr.ingress) {
- uint8_t rss_key[40];
- struct rte_eth_rss_conf rss_conf = {
- .rss_key = rss_key,
- .rss_key_len = 40,
- };
- struct rte_eth_dev *eth_dev;
- uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
- struct rte_flow_action_rss action_rss;
- unsigned int i;
- unsigned int j;
-
- sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
- /* Try RSS. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
- sa->action[1].conf = &action_rss;
- eth_dev = ctx->device;
- rte_eth_dev_rss_hash_conf_get(sa->portid,
- &rss_conf);
- for (i = 0, j = 0;
- i < eth_dev->data->nb_rx_queues; ++i)
- if (eth_dev->data->rx_queues[i])
- queue[j++] = i;
- action_rss = (struct rte_flow_action_rss){
+ sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
+ sa->pattern[2].spec = &sa->esp_spec;
+ sa->pattern[2].mask = &rte_flow_item_esp_mask;
+ sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
+
+ sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ sa->action[0].conf = sa->sec_session;
+
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ sa->attr.egress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
+ sa->attr.ingress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+ if (sa->attr.ingress) {
+ uint8_t rss_key[40];
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = rss_key,
+ .rss_key_len = 40,
+ };
+ struct rte_eth_dev *eth_dev;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ struct rte_flow_action_rss action_rss;
+ unsigned int i;
+ unsigned int j;
+
+ sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
+ /* Try RSS. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+ sa->action[1].conf = &action_rss;
+ eth_dev = sec_ctx->device;
+ rte_eth_dev_rss_hash_conf_get(sa->portid, &rss_conf);
+ for (i = 0, j = 0;
+ i < eth_dev->data->nb_rx_queues; ++i)
+ if (eth_dev->data->rx_queues[i])
+ queue[j++] = i;
+
+ action_rss = (struct rte_flow_action_rss){
.types = rss_conf.rss_hf,
.key_len = rss_conf.rss_key_len,
.queue_num = j,
.key = rss_key,
.queue = queue,
- };
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- if (!ret)
- goto flow_create;
- /* Try Queue. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
- sa->action[1].conf =
- &(struct rte_flow_action_queue){
- .index = 0,
- };
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- /* Try End. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
- sa->action[1].conf = NULL;
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- if (ret)
- goto flow_create_failure;
- } else if (sa->attr.egress &&
- (sa->ol_flags &
+ };
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ if (!ret)
+ goto flow_create;
+ /* Try Queue. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ sa->action[1].conf =
+ &(struct rte_flow_action_queue){
+ .index = 0,
+ };
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ /* Try End. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ sa->action[1].conf = NULL;
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ if (ret)
+ goto flow_create_failure;
+ } else if (sa->attr.egress &&
+ (sa->ol_flags &
RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
- sa->action[1].type =
+ sa->action[1].type =
RTE_FLOW_ACTION_TYPE_PASSTHRU;
- sa->action[2].type =
+ sa->action[2].type =
RTE_FLOW_ACTION_TYPE_END;
- }
+ }
flow_create:
- sa->flow = rte_flow_create(sa->portid,
+ sa->flow = rte_flow_create(sa->portid,
&sa->attr, sa->pattern, sa->action, &err);
- if (sa->flow == NULL) {
+ if (sa->flow == NULL) {
flow_create_failure:
- RTE_LOG(ERR, IPSEC,
- "Failed to create ipsec flow msg: %s\n",
- err.message);
- return -1;
- }
- } else if (sa->type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
- struct rte_security_ctx *ctx =
- (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(sa->portid);
- const struct rte_security_capability *sec_cap;
-
- if (ctx == NULL) {
- RTE_LOG(ERR, IPSEC,
- "Ethernet device doesn't have security features registered\n");
- return -1;
- }
-
- /* Set IPsec parameters in conf */
- set_ipsec_conf(sa, &(sess_conf.ipsec));
+ RTE_LOG(ERR, IPSEC,
+ "Failed to create ipsec flow msg: %s\n",
+ err.message);
+ return -1;
+ }
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ const struct rte_security_capability *sec_cap;
- /* Save SA as userdata for the security session. When
- * the packet is received, this userdata will be
- * retrieved using the metadata from the packet.
- *
- * The PMD is expected to set similar metadata for other
- * operations, like rte_eth_event, which are tied to
- * security session. In such cases, the userdata could
- * be obtained to uniquely identify the security
- * parameters denoted.
- */
+ sec_ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(sa->portid);
- sess_conf.userdata = (void *) sa;
+ if (sec_ctx == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "Ethernet device doesn't have security features registered\n");
+ return -1;
+ }
- sa->sec_session = rte_security_session_create(ctx,
- &sess_conf, ipsec_ctx->session_pool);
- if (sa->sec_session == NULL) {
- RTE_LOG(ERR, IPSEC,
+ /* Set IPsec parameters in conf */
+ set_ipsec_conf(sa, &(sess_conf.ipsec));
+
+ /* Save SA as userdata for the security session. When
+ * the packet is received, this userdata will be
+ * retrieved using the metadata from the packet.
+ *
+ * The PMD is expected to set similar metadata for other
+ * operations, like rte_eth_event, which are tied to
+ * security session. In such cases, the userdata could
+ * be obtained to uniquely identify the security
+ * parameters denoted.
+ */
+
+ sess_conf.userdata = (void *) sa;
+
+ sa->sec_session = rte_security_session_create(sec_ctx,
+ &sess_conf, skt_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
"SEC Session init failed: err: %d\n", ret);
- return -1;
- }
-
- sec_cap = rte_security_capabilities_get(ctx);
+ return -1;
+ }
- if (sec_cap == NULL) {
- RTE_LOG(ERR, IPSEC,
+ sec_cap = rte_security_capabilities_get(sec_ctx);
+ if (sec_cap == NULL) {
+ RTE_LOG(ERR, IPSEC,
"No capabilities registered\n");
- return -1;
- }
+ return -1;
+ }
- /* iterate until ESP tunnel*/
- while (sec_cap->action !=
- RTE_SECURITY_ACTION_TYPE_NONE) {
-
- if (sec_cap->action == sa->type &&
- sec_cap->protocol ==
- RTE_SECURITY_PROTOCOL_IPSEC &&
- sec_cap->ipsec.mode ==
- sess_conf.ipsec.mode &&
- sec_cap->ipsec.direction == sa->direction)
- break;
- sec_cap++;
- }
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action !=
+ RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (sec_cap->action == sa->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ sess_conf.ipsec.mode &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
- if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
- RTE_LOG(ERR, IPSEC,
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
"No suitable security capability found\n");
- return -1;
- }
-
- sa->ol_flags = sec_cap->ol_flags;
- sa->security_ctx = ctx;
+ return -1;
}
- } else {
- sa->crypto_session = rte_cryptodev_sym_session_create(
- ipsec_ctx->session_pool);
- rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
- sa->crypto_session, sa->xforms,
- ipsec_ctx->session_priv_pool);
- rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
- &cdev_info);
+ sa->ol_flags = sec_cap->ol_flags;
+ sa->security_ctx = sec_ctx;
}
- sa->cdev_id_qp = cdev_id_qp;
+ sa->cdev_id_qp = 0;
return 0;
}
@@ -398,7 +433,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
+ create_lookaside_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -417,7 +452,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->crypto_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
+ create_lookaside_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -432,12 +467,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
}
break;
case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
- if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
+ RTE_ASSERT(sa->sec_session != NULL);
ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
rte_security_set_pkt_metadata(
@@ -445,17 +475,11 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
sa->sec_session, pkts[i], NULL);
continue;
case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ RTE_ASSERT(sa->sec_session != NULL);
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
-
- if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
rte_security_attach_session(&priv->cop,
sa->sec_session);
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 6e48466..1efa6e4 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -338,6 +338,9 @@ void
enqueue_cop_burst(struct cdev_qp *cqp);
int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+
+int
+create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa);
#endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 3f9cacb..868f1a2 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -95,22 +95,23 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
/* setup crypto section */
if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
if (sa->crypto_session == NULL) {
- rc = create_session(ctx, sa);
+ rc = create_lookaside_session(ctx, sa);
if (rc != 0)
return rc;
}
ss->crypto.ses = sa->crypto_session;
/* setup session action type */
- } else {
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
if (sa->sec_session == NULL) {
- rc = create_session(ctx, sa);
+ rc = create_lookaside_session(ctx, sa);
if (rc != 0)
return rc;
}
ss->security.ses = sa->sec_session;
ss->security.ctx = sa->security_ctx;
ss->security.ol_flags = sa->ol_flags;
- }
+ } else
+ RTE_ASSERT(0);
rc = rte_ipsec_session_prepare(ss);
if (rc != 0)
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index d700c8e..c3cf3bd 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -868,12 +868,14 @@ sa_add_address_inline_crypto(struct ipsec_sa *sa)
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries, uint32_t inbound)
+ uint32_t nb_entries, uint32_t inbound,
+ struct socket_ctx *skt_ctx)
{
struct ipsec_sa *sa;
uint32_t i, idx;
uint16_t iv_length, aad_length;
int inline_status;
+ int32_t rc;
/* for ESN upper 32 bits of SQN also need to be part of AAD */
aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
@@ -936,6 +938,17 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->xforms = &sa_ctx->xf[idx].a;
+ if (sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ rc = create_inline_session(skt_ctx, sa);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "create_inline_session() failed\n");
+ return -EINVAL;
+ }
+ }
print_one_sa_rule(sa, inbound);
} else {
switch (sa->cipher_algo) {
@@ -1011,16 +1024,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 0);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 1);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
}
/*
@@ -1086,10 +1099,12 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
return 0;
}
-static void
+static int
fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
const struct ipsec_sa *lsa)
{
+ int32_t rc = 0;
+
ss->sa = sa;
ss->type = lsa->type;
@@ -1102,6 +1117,17 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
ss->security.ctx = lsa->security_ctx;
ss->security.ol_flags = lsa->ol_flags;
}
+
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ if (ss->security.ses != NULL) {
+ rc = rte_ipsec_session_prepare(ss);
+ if (rc != 0)
+ memset(ss, 0, sizeof(*ss));
+ }
+ }
+
+ return rc;
}
/*
@@ -1136,8 +1162,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
if (rc < 0)
return rc;
- fill_ipsec_session(&lsa->ips, sa, lsa);
- return 0;
+ rc = fill_ipsec_session(&lsa->ips, sa, lsa);
+ return rc;
}
/*
@@ -1240,7 +1266,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
+ sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
@@ -1260,7 +1286,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
+ sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-stable] [DPDK 2/2] examples/ipsec-secgw/test: fix inline test scripts
2019-07-10 10:57 [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto Bernard Iremonger
@ 2019-07-10 10:57 ` Bernard Iremonger
2019-07-10 12:58 ` [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto Thomas Monjalon
1 sibling, 0 replies; 7+ messages in thread
From: Bernard Iremonger @ 2019-07-10 10:57 UTC (permalink / raw)
To: qabuild; +Cc: Bernard Iremonger, stable
Remove workaround in tun_aesgcm_defs.sh and trs_aesgcm_defs.sh
to get around the bug where the first inbound packet is dropped
for inline crypto.
Fixes: 929784452094 ("examples/ipsec-secgw: add scripts for functional test")
Cc: stable@dpdk.org
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
examples/ipsec-secgw/test/trs_aesgcm_defs.sh | 10 ----------
examples/ipsec-secgw/test/tun_aesgcm_defs.sh | 10 ----------
2 files changed, 20 deletions(-)
diff --git a/examples/ipsec-secgw/test/trs_aesgcm_defs.sh b/examples/ipsec-secgw/test/trs_aesgcm_defs.sh
index a4d902b..8382d3d 100755
--- a/examples/ipsec-secgw/test/trs_aesgcm_defs.sh
+++ b/examples/ipsec-secgw/test/trs_aesgcm_defs.sh
@@ -33,11 +33,6 @@ aead "rfc4106\(gcm\(aes\)\)" \
ssh ${REMOTE_HOST} ip xfrm policy list
ssh ${REMOTE_HOST} ip xfrm state list
-
- # to overcome problem with ipsec-secgw for inline mode,
- # when first packet(s) will be always dropped.
- # note that ping will fail here
- ssh ${REMOTE_HOST} ping -c 1 ${LOCAL_IPV4}
}
config6_remote_xfrm()
@@ -68,9 +63,4 @@ aead "rfc4106\(gcm\(aes\)\)" \
ssh ${REMOTE_HOST} ip xfrm policy list
ssh ${REMOTE_HOST} ip xfrm state list
-
- # to overcome problem with ipsec-secgw for inline mode,
- # when first packet(s) will be always dropped.
- # note that ping will fail here
- ssh ${REMOTE_HOST} ping -c 1 ${LOCAL_IPV6}
}
diff --git a/examples/ipsec-secgw/test/tun_aesgcm_defs.sh b/examples/ipsec-secgw/test/tun_aesgcm_defs.sh
index 1764ef6..8ae6532 100755
--- a/examples/ipsec-secgw/test/tun_aesgcm_defs.sh
+++ b/examples/ipsec-secgw/test/tun_aesgcm_defs.sh
@@ -35,11 +35,6 @@ aead "rfc4106\(gcm\(aes\)\)" \
ssh ${REMOTE_HOST} ip xfrm policy list
ssh ${REMOTE_HOST} ip xfrm state list
-
- # to overcome problem with ipsec-secgw for inline mode,
- # when first packet(s) will be always dropped.
- # note that ping will fail here
- ssh ${REMOTE_HOST} ping -c 1 ${LOCAL_IPV4}
}
config6_remote_xfrm()
@@ -72,9 +67,4 @@ aead "rfc4106\(gcm\(aes\)\)" \
ssh ${REMOTE_HOST} ip xfrm policy list
ssh ${REMOTE_HOST} ip xfrm state list
-
- # to overcome problem with ipsec-secgw for inline mode,
- # when first packet(s) will be always dropped.
- # note that ping will fail here
- ssh ${REMOTE_HOST} ping6 -c 1 ${LOCAL_IPV6}
}
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto
2019-07-10 10:57 [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto Bernard Iremonger
2019-07-10 10:57 ` [dpdk-stable] [DPDK 2/2] examples/ipsec-secgw/test: fix inline test scripts Bernard Iremonger
@ 2019-07-10 12:58 ` Thomas Monjalon
2019-07-10 13:32 ` Iremonger, Bernard
1 sibling, 1 reply; 7+ messages in thread
From: Thomas Monjalon @ 2019-07-10 12:58 UTC (permalink / raw)
To: Bernard Iremonger; +Cc: stable, qabuild
Is this patch intended to be sent to stable@dpdk.org?
If yes, dev@dpdk.org is missing.
If no, you should disable the automatic Cc with something like
--suppress-cc=bodycc
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto
2019-07-10 12:58 ` [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto Thomas Monjalon
@ 2019-07-10 13:32 ` Iremonger, Bernard
0 siblings, 0 replies; 7+ messages in thread
From: Iremonger, Bernard @ 2019-07-10 13:32 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: stable, Iremonger, Bernard
Hi Thomas,
No, this patch is intended for Intel internal QA only.
For future patches I will remove "Cc: stable@dpdk.org".
Regards,
Bernard
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Wednesday, July 10, 2019 1:59 PM
> To: Iremonger, Bernard <bernard.iremonger@intel.com>
> Cc: stable@dpdk.org; qabuild <qabuild@intel.com>
> Subject: Re: [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt
> dropped for inline crypto
>
> Is this patch intended to be sent to stable@dpdk.org?
> If yes, dev@dpdk.org is missing.
> If no, you should disable the automatic Cc with something like
> --suppress-cc=bodycc
>
>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto
@ 2019-07-10 12:49 Bernard Iremonger
0 siblings, 0 replies; 7+ messages in thread
From: Bernard Iremonger @ 2019-07-10 12:49 UTC (permalink / raw)
To: qabuild; +Cc: Bernard Iremonger, stable
Inline crypto installs a flow rule in the NIC. This flow
rule must be installed before the first inbound packet is
received.
The create_session() function installs the flow rule,
create_session() has been refactored into create_inline_session()
and create_lookaside_session(). The create_inline_session() function
uses the socket_ctx data and is now called at initialisation in
sa_add_rules().
The max_session_size() function has been added to calculate memory
requirements.
The cryprodev_init() function has been refactored to drop calls to
rte_mempool_create() and to drop calculation of memory requirements.
The main() function has been refactored to call max_session_size() and
to call session_pool_init() and session_priv_pool_init() earlier.
The ports are started now before adding a flow rule in main().
The sa_init(), sp4_init(), sp6_init() and rt_init() functions are
now called after the ports have been started.
The rte_ipsec_session_prepare() function is called in fill_ipsec_session()
for inline which is called from the ipsec_sa_init() function.
Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload")
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: stable@dpdk.org
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 244 +++++++++----------
examples/ipsec-secgw/ipsec.c | 456 ++++++++++++++++++-----------------
examples/ipsec-secgw/ipsec.h | 5 +-
examples/ipsec-secgw/ipsec_process.c | 9 +-
examples/ipsec-secgw/sa.c | 46 +++-
5 files changed, 405 insertions(+), 355 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index b1ecbb9..f4819ce 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1802,7 +1802,7 @@ cryptodevs_init(void)
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
uint16_t idx, max_nb_qps, qp, i;
- int16_t cdev_id, port_id;
+ int16_t cdev_id;
struct rte_hash_parameters params = { 0 };
const uint64_t mseg_flag = multi_seg_required() ?
@@ -1828,45 +1828,6 @@ cryptodevs_init(void)
printf("lcore/cryptodev/qp mappings:\n");
- uint32_t max_sess_sz = 0, sess_sz;
- for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
- void *sec_ctx;
-
- /* Get crypto priv session size */
- sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
-
- /*
- * If crypto device is security capable, need to check the
- * size of security session as well.
- */
-
- /* Get security context of the crypto device */
- sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
- if (sec_ctx == NULL)
- continue;
-
- /* Get size of security session */
- sess_sz = rte_security_session_get_size(sec_ctx);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
- }
- RTE_ETH_FOREACH_DEV(port_id) {
- void *sec_ctx;
-
- if ((enabled_port_mask & (1 << port_id)) == 0)
- continue;
-
- sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
- if (sec_ctx == NULL)
- continue;
-
- sess_sz = rte_security_session_get_size(sec_ctx);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
- }
-
idx = 0;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
struct rte_cryptodev_info cdev_info;
@@ -1911,45 +1872,6 @@ cryptodevs_init(void)
"Device does not support at least %u "
"sessions", CDEV_MP_NB_OBJS);
- if (!socket_ctx[dev_conf.socket_id].session_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_%u", dev_conf.socket_id);
- sess_mp = rte_cryptodev_sym_session_pool_create(
- mp_name, CDEV_MP_NB_OBJS,
- 0, CDEV_MP_CACHE_SZ, 0,
- dev_conf.socket_id);
- socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
- }
-
- if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_priv_%u", dev_conf.socket_id);
- sess_mp = rte_mempool_create(mp_name,
- CDEV_MP_NB_OBJS,
- max_sess_sz,
- CDEV_MP_CACHE_SZ,
- 0, NULL, NULL, NULL,
- NULL, dev_conf.socket_id,
- 0);
- socket_ctx[dev_conf.socket_id].session_priv_pool =
- sess_mp;
- }
-
- if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
- !socket_ctx[dev_conf.socket_id].session_pool)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool on socket %d\n",
- dev_conf.socket_id);
- else
- printf("Allocated session pool on socket %d\n",
- dev_conf.socket_id);
-
if (rte_cryptodev_configure(cdev_id, &dev_conf))
rte_panic("Failed to initialize cryptodev %u\n",
cdev_id);
@@ -1970,39 +1892,6 @@ cryptodevs_init(void)
cdev_id);
}
- /* create session pools for eth devices that implement security */
- RTE_ETH_FOREACH_DEV(port_id) {
- if ((enabled_port_mask & (1 << port_id)) &&
- rte_eth_dev_get_sec_ctx(port_id)) {
- int socket_id = rte_eth_dev_socket_id(port_id);
-
- if (!socket_ctx[socket_id].session_priv_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_%u", socket_id);
- sess_mp = rte_mempool_create(mp_name,
- (CDEV_MP_NB_OBJS * 2),
- max_sess_sz,
- CDEV_MP_CACHE_SZ,
- 0, NULL, NULL, NULL,
- NULL, socket_id,
- 0);
- if (sess_mp == NULL)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool "
- "on socket %d\n", socket_id);
- else
- printf("Allocated session pool "
- "on socket %d\n", socket_id);
- socket_ctx[socket_id].session_priv_pool =
- sess_mp;
- }
- }
- }
-
-
printf("\n");
return 0;
@@ -2174,6 +2063,99 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
printf("\n");
}
+static size_t
+max_session_size(void)
+{
+ size_t max_sz, sz;
+ void *sec_ctx;
+ int16_t cdev_id, port_id, n;
+
+ max_sz = 0;
+ n = rte_cryptodev_count();
+ for (cdev_id = 0; cdev_id != n; cdev_id++) {
+ sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (sz > max_sz)
+ max_sz = sz;
+ /*
+ * If crypto device is security capable, need to check the
+ * size of security session as well.
+ */
+
+ /* Get security context of the crypto device */
+ sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ /* Get size of security session */
+ sz = rte_security_session_get_size(sec_ctx);
+ if (sz > max_sz)
+ max_sz = sz;
+ }
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ sz = rte_security_session_get_size(sec_ctx);
+ if (sz > max_sz)
+ max_sz = sz;
+ }
+
+ return max_sz;
+}
+
+static void
+session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
+{
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+ sess_mp = rte_cryptodev_sym_session_pool_create(
+ mp_name, CDEV_MP_NB_OBJS,
+ sess_sz, CDEV_MP_CACHE_SZ, 0,
+ socket_id);
+ ctx->session_pool = sess_mp;
+
+ if (ctx->session_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init session pool on socket %d\n", socket_id);
+ else
+ printf("Allocated session pool on socket %d\n", socket_id);
+}
+
+static void
+session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
+ size_t sess_sz)
+{
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_priv_%u", socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+ ctx->session_priv_pool = sess_mp;
+
+ if (ctx->session_priv_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init session priv pool on socket %d\n",
+ socket_id);
+ else
+ printf("Allocated session priv pool on socket %d\n",
+ socket_id);
+}
+
static void
pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
{
@@ -2397,9 +2379,11 @@ main(int32_t argc, char **argv)
{
int32_t ret;
uint32_t lcore_id;
+ uint32_t i;
uint8_t socket_id;
uint16_t portid;
uint64_t req_rx_offloads, req_tx_offloads;
+ size_t sess_sz;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -2427,7 +2411,8 @@ main(int32_t argc, char **argv)
nb_lcores = rte_lcore_count();
- /* Replicate each context per socket */
+ sess_sz = max_session_size();
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
@@ -2437,20 +2422,14 @@ main(int32_t argc, char **argv)
else
socket_id = 0;
+ /* mbuf_pool is initialised by the pool_init() function*/
if (socket_ctx[socket_id].mbuf_pool)
continue;
- /* initilaze SPD */
- sp4_init(&socket_ctx[socket_id], socket_id);
-
- sp6_init(&socket_ctx[socket_id], socket_id);
-
- /* initilaze SAD */
- sa_init(&socket_ctx[socket_id], socket_id);
-
- rt_init(&socket_ctx[socket_id], socket_id);
-
pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
+ session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
+ session_priv_pool_init(&socket_ctx[socket_id], socket_id,
+ sess_sz);
}
RTE_ETH_FOREACH_DEV(portid) {
@@ -2468,7 +2447,11 @@ main(int32_t argc, char **argv)
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- /* Start device */
+ /*
+ * Start device
+ * note: device must be started before a flow rule
+ * can be installed.
+ */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
@@ -2493,6 +2476,19 @@ main(int32_t argc, char **argv)
rte_exit(EXIT_FAILURE, "failed at reassemble init");
}
+ /* Replicate each context per socket */
+ for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
+ socket_id = rte_socket_id_by_idx(i);
+ if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+ (socket_ctx[socket_id].sa_in == NULL) &&
+ (socket_ctx[socket_id].sa_out == NULL)) {
+ sa_init(&socket_ctx[socket_id], socket_id);
+ sp4_init(&socket_ctx[socket_id], socket_id);
+ sp6_init(&socket_ctx[socket_id], socket_id);
+ rt_init(&socket_ctx[socket_id], socket_id);
+ }
+ }
+
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index d1cbdc3..0f23cb1 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -40,7 +40,7 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
}
int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
+create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
@@ -53,19 +53,17 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
- if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
- ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
- (void **)&cdev_id_qp);
- if (ret < 0) {
- RTE_LOG(ERR, IPSEC,
+ ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
+ (void **)&cdev_id_qp);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
"No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u, aead_algo %u\n",
key.lcore_id,
key.cipher_algo,
key.auth_algo,
key.aead_algo);
- return -1;
- }
+ return -1;
}
RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
@@ -107,231 +105,268 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
"SEC Session init failed: err: %d\n", ret);
return -1;
}
- } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
- struct rte_flow_error err;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- sa->portid);
- const struct rte_security_capability *sec_cap;
- int ret = 0;
-
- sa->sec_session = rte_security_session_create(ctx,
- &sess_conf, ipsec_ctx->session_priv_pool);
- if (sa->sec_session == NULL) {
- RTE_LOG(ERR, IPSEC,
- "SEC Session init failed: err: %d\n", ret);
- return -1;
- }
-
- sec_cap = rte_security_capabilities_get(ctx);
+ } else {
+ RTE_LOG(ERR, IPSEC, "Inline not supported\n");
+ return -1;
+ }
+ } else {
+ sa->crypto_session = rte_cryptodev_sym_session_create(
+ ipsec_ctx->session_pool);
+ rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+ sa->crypto_session, sa->xforms,
+ ipsec_ctx->session_priv_pool);
- /* iterate until ESP tunnel*/
- while (sec_cap->action !=
- RTE_SECURITY_ACTION_TYPE_NONE) {
+ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
+ &cdev_info);
+ }
- if (sec_cap->action == sa->type &&
- sec_cap->protocol ==
- RTE_SECURITY_PROTOCOL_IPSEC &&
- sec_cap->ipsec.mode ==
- sess_conf.ipsec.mode &&
- sec_cap->ipsec.direction == sa->direction)
- break;
- sec_cap++;
- }
+ sa->cdev_id_qp = cdev_id_qp;
- if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
- RTE_LOG(ERR, IPSEC,
- "No suitable security capability found\n");
- return -1;
- }
+ return 0;
+}
- sa->ol_flags = sec_cap->ol_flags;
- sa->security_ctx = ctx;
- sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
-
- if (IS_IP6(sa->flags)) {
- sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
- sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
- sa->pattern[1].spec = &sa->ipv6_spec;
-
- memcpy(sa->ipv6_spec.hdr.dst_addr,
- sa->dst.ip.ip6.ip6_b, 16);
- memcpy(sa->ipv6_spec.hdr.src_addr,
- sa->src.ip.ip6.ip6_b, 16);
- } else if (IS_IP4(sa->flags)) {
- sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
- sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
- sa->pattern[1].spec = &sa->ipv4_spec;
-
- sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
- sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
- }
+int
+create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa)
+{
+ int32_t ret = 0;
+ struct rte_security_ctx *sec_ctx;
+ struct rte_security_session_conf sess_conf = {
+ .action_type = sa->type,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .spi = sa->spi,
+ .salt = sa->salt,
+ .options = { 0 },
+ .direction = sa->direction,
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = (sa->flags == IP4_TUNNEL ||
+ sa->flags == IP6_TUNNEL) ?
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ } },
+ .crypto_xform = sa->xforms,
+ .userdata = NULL,
+ };
+
+ RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
+ sa->spi, sa->portid);
+
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ struct rte_flow_error err;
+ const struct rte_security_capability *sec_cap;
+ int ret = 0;
+
+ sec_ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ sa->portid);
+ if (sec_ctx == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ " rte_eth_dev_get_sec_ctx failed\n");
+ return -1;
+ }
- sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
- sa->pattern[2].spec = &sa->esp_spec;
- sa->pattern[2].mask = &rte_flow_item_esp_mask;
- sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
+ sa->sec_session = rte_security_session_create(sec_ctx,
+ &sess_conf, skt_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "SEC Session init failed: err: %d\n", ret);
+ return -1;
+ }
- sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+ sec_cap = rte_security_capabilities_get(sec_ctx);
+
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (sec_cap->action == sa->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
- sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
- sa->action[0].conf = sa->sec_session;
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
+ "No suitable security capability found\n");
+ return -1;
+ }
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ sa->ol_flags = sec_cap->ol_flags;
+ sa->security_ctx = sec_ctx;
+ sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+
+ if (IS_IP6(sa->flags)) {
+ sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
+ sa->pattern[1].spec = &sa->ipv6_spec;
+
+ memcpy(sa->ipv6_spec.hdr.dst_addr,
+ sa->dst.ip.ip6.ip6_b, 16);
+ memcpy(sa->ipv6_spec.hdr.src_addr,
+ sa->src.ip.ip6.ip6_b, 16);
+ } else if (IS_IP4(sa->flags)) {
+ sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ sa->pattern[1].spec = &sa->ipv4_spec;
+
+ sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
+ sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
+ }
- sa->attr.egress = (sa->direction ==
- RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
- sa->attr.ingress = (sa->direction ==
- RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
- if (sa->attr.ingress) {
- uint8_t rss_key[40];
- struct rte_eth_rss_conf rss_conf = {
- .rss_key = rss_key,
- .rss_key_len = 40,
- };
- struct rte_eth_dev *eth_dev;
- uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
- struct rte_flow_action_rss action_rss;
- unsigned int i;
- unsigned int j;
-
- sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
- /* Try RSS. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
- sa->action[1].conf = &action_rss;
- eth_dev = ctx->device;
- rte_eth_dev_rss_hash_conf_get(sa->portid,
- &rss_conf);
- for (i = 0, j = 0;
- i < eth_dev->data->nb_rx_queues; ++i)
- if (eth_dev->data->rx_queues[i])
- queue[j++] = i;
- action_rss = (struct rte_flow_action_rss){
+ sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
+ sa->pattern[2].spec = &sa->esp_spec;
+ sa->pattern[2].mask = &rte_flow_item_esp_mask;
+ sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
+
+ sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ sa->action[0].conf = sa->sec_session;
+
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ sa->attr.egress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
+ sa->attr.ingress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+ if (sa->attr.ingress) {
+ uint8_t rss_key[40];
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = rss_key,
+ .rss_key_len = 40,
+ };
+ struct rte_eth_dev *eth_dev;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ struct rte_flow_action_rss action_rss;
+ unsigned int i;
+ unsigned int j;
+
+ sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
+ /* Try RSS. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+ sa->action[1].conf = &action_rss;
+ eth_dev = sec_ctx->device;
+ rte_eth_dev_rss_hash_conf_get(sa->portid, &rss_conf);
+ for (i = 0, j = 0;
+ i < eth_dev->data->nb_rx_queues; ++i)
+ if (eth_dev->data->rx_queues[i])
+ queue[j++] = i;
+
+ action_rss = (struct rte_flow_action_rss){
.types = rss_conf.rss_hf,
.key_len = rss_conf.rss_key_len,
.queue_num = j,
.key = rss_key,
.queue = queue,
- };
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- if (!ret)
- goto flow_create;
- /* Try Queue. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
- sa->action[1].conf =
- &(struct rte_flow_action_queue){
- .index = 0,
- };
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- /* Try End. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
- sa->action[1].conf = NULL;
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- if (ret)
- goto flow_create_failure;
- } else if (sa->attr.egress &&
- (sa->ol_flags &
+ };
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ if (!ret)
+ goto flow_create;
+ /* Try Queue. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ sa->action[1].conf =
+ &(struct rte_flow_action_queue){
+ .index = 0,
+ };
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ /* Try End. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ sa->action[1].conf = NULL;
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ if (ret)
+ goto flow_create_failure;
+ } else if (sa->attr.egress &&
+ (sa->ol_flags &
RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
- sa->action[1].type =
+ sa->action[1].type =
RTE_FLOW_ACTION_TYPE_PASSTHRU;
- sa->action[2].type =
+ sa->action[2].type =
RTE_FLOW_ACTION_TYPE_END;
- }
+ }
flow_create:
- sa->flow = rte_flow_create(sa->portid,
+ sa->flow = rte_flow_create(sa->portid,
&sa->attr, sa->pattern, sa->action, &err);
- if (sa->flow == NULL) {
+ if (sa->flow == NULL) {
flow_create_failure:
- RTE_LOG(ERR, IPSEC,
- "Failed to create ipsec flow msg: %s\n",
- err.message);
- return -1;
- }
- } else if (sa->type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
- struct rte_security_ctx *ctx =
- (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(sa->portid);
- const struct rte_security_capability *sec_cap;
-
- if (ctx == NULL) {
- RTE_LOG(ERR, IPSEC,
- "Ethernet device doesn't have security features registered\n");
- return -1;
- }
-
- /* Set IPsec parameters in conf */
- set_ipsec_conf(sa, &(sess_conf.ipsec));
+ RTE_LOG(ERR, IPSEC,
+ "Failed to create ipsec flow msg: %s\n",
+ err.message);
+ return -1;
+ }
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ const struct rte_security_capability *sec_cap;
- /* Save SA as userdata for the security session. When
- * the packet is received, this userdata will be
- * retrieved using the metadata from the packet.
- *
- * The PMD is expected to set similar metadata for other
- * operations, like rte_eth_event, which are tied to
- * security session. In such cases, the userdata could
- * be obtained to uniquely identify the security
- * parameters denoted.
- */
+ sec_ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(sa->portid);
- sess_conf.userdata = (void *) sa;
+ if (sec_ctx == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "Ethernet device doesn't have security features registered\n");
+ return -1;
+ }
- sa->sec_session = rte_security_session_create(ctx,
- &sess_conf, ipsec_ctx->session_pool);
- if (sa->sec_session == NULL) {
- RTE_LOG(ERR, IPSEC,
+ /* Set IPsec parameters in conf */
+ set_ipsec_conf(sa, &(sess_conf.ipsec));
+
+ /* Save SA as userdata for the security session. When
+ * the packet is received, this userdata will be
+ * retrieved using the metadata from the packet.
+ *
+ * The PMD is expected to set similar metadata for other
+ * operations, like rte_eth_event, which are tied to
+ * security session. In such cases, the userdata could
+ * be obtained to uniquely identify the security
+ * parameters denoted.
+ */
+
+ sess_conf.userdata = (void *) sa;
+
+ sa->sec_session = rte_security_session_create(sec_ctx,
+ &sess_conf, skt_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
"SEC Session init failed: err: %d\n", ret);
- return -1;
- }
-
- sec_cap = rte_security_capabilities_get(ctx);
+ return -1;
+ }
- if (sec_cap == NULL) {
- RTE_LOG(ERR, IPSEC,
+ sec_cap = rte_security_capabilities_get(sec_ctx);
+ if (sec_cap == NULL) {
+ RTE_LOG(ERR, IPSEC,
"No capabilities registered\n");
- return -1;
- }
+ return -1;
+ }
- /* iterate until ESP tunnel*/
- while (sec_cap->action !=
- RTE_SECURITY_ACTION_TYPE_NONE) {
-
- if (sec_cap->action == sa->type &&
- sec_cap->protocol ==
- RTE_SECURITY_PROTOCOL_IPSEC &&
- sec_cap->ipsec.mode ==
- sess_conf.ipsec.mode &&
- sec_cap->ipsec.direction == sa->direction)
- break;
- sec_cap++;
- }
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action !=
+ RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (sec_cap->action == sa->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ sess_conf.ipsec.mode &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
- if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
- RTE_LOG(ERR, IPSEC,
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
"No suitable security capability found\n");
- return -1;
- }
-
- sa->ol_flags = sec_cap->ol_flags;
- sa->security_ctx = ctx;
+ return -1;
}
- } else {
- sa->crypto_session = rte_cryptodev_sym_session_create(
- ipsec_ctx->session_pool);
- rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
- sa->crypto_session, sa->xforms,
- ipsec_ctx->session_priv_pool);
- rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
- &cdev_info);
+ sa->ol_flags = sec_cap->ol_flags;
+ sa->security_ctx = sec_ctx;
}
- sa->cdev_id_qp = cdev_id_qp;
+ sa->cdev_id_qp = 0;
return 0;
}
@@ -398,7 +433,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
+ create_lookaside_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -417,7 +452,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->crypto_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
+ create_lookaside_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -432,12 +467,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
}
break;
case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
- if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
+ RTE_ASSERT(sa->sec_session != NULL);
ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
rte_security_set_pkt_metadata(
@@ -445,17 +475,11 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
sa->sec_session, pkts[i], NULL);
continue;
case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ RTE_ASSERT(sa->sec_session != NULL);
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
-
- if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
rte_security_attach_session(&priv->cop,
sa->sec_session);
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 6e48466..1efa6e4 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -338,6 +338,9 @@ void
enqueue_cop_burst(struct cdev_qp *cqp);
int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+
+int
+create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa);
#endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 3f9cacb..868f1a2 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -95,22 +95,23 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
/* setup crypto section */
if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
if (sa->crypto_session == NULL) {
- rc = create_session(ctx, sa);
+ rc = create_lookaside_session(ctx, sa);
if (rc != 0)
return rc;
}
ss->crypto.ses = sa->crypto_session;
/* setup session action type */
- } else {
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
if (sa->sec_session == NULL) {
- rc = create_session(ctx, sa);
+ rc = create_lookaside_session(ctx, sa);
if (rc != 0)
return rc;
}
ss->security.ses = sa->sec_session;
ss->security.ctx = sa->security_ctx;
ss->security.ol_flags = sa->ol_flags;
- }
+ } else
+ RTE_ASSERT(0);
rc = rte_ipsec_session_prepare(ss);
if (rc != 0)
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index d700c8e..c3cf3bd 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -868,12 +868,14 @@ sa_add_address_inline_crypto(struct ipsec_sa *sa)
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries, uint32_t inbound)
+ uint32_t nb_entries, uint32_t inbound,
+ struct socket_ctx *skt_ctx)
{
struct ipsec_sa *sa;
uint32_t i, idx;
uint16_t iv_length, aad_length;
int inline_status;
+ int32_t rc;
/* for ESN upper 32 bits of SQN also need to be part of AAD */
aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
@@ -936,6 +938,17 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->xforms = &sa_ctx->xf[idx].a;
+ if (sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ rc = create_inline_session(skt_ctx, sa);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "create_inline_session() failed\n");
+ return -EINVAL;
+ }
+ }
print_one_sa_rule(sa, inbound);
} else {
switch (sa->cipher_algo) {
@@ -1011,16 +1024,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 0);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 1);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
}
/*
@@ -1086,10 +1099,12 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
return 0;
}
-static void
+static int
fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
const struct ipsec_sa *lsa)
{
+ int32_t rc = 0;
+
ss->sa = sa;
ss->type = lsa->type;
@@ -1102,6 +1117,17 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
ss->security.ctx = lsa->security_ctx;
ss->security.ol_flags = lsa->ol_flags;
}
+
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ if (ss->security.ses != NULL) {
+ rc = rte_ipsec_session_prepare(ss);
+ if (rc != 0)
+ memset(ss, 0, sizeof(*ss));
+ }
+ }
+
+ return rc;
}
/*
@@ -1136,8 +1162,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
if (rc < 0)
return rc;
- fill_ipsec_session(&lsa->ips, sa, lsa);
- return 0;
+ rc = fill_ipsec_session(&lsa->ips, sa, lsa);
+ return rc;
}
/*
@@ -1240,7 +1266,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
+ sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
@@ -1260,7 +1286,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
+ sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto
@ 2019-06-12 14:19 Bernard Iremonger
0 siblings, 0 replies; 7+ messages in thread
From: Bernard Iremonger @ 2019-06-12 14:19 UTC (permalink / raw)
To: qabuild; +Cc: Bernard Iremonger, stable
Inline crypto installs a flow rule in the NIC. This flow
rule must be installed before the first inbound packet is
received.
The create_session() function installs the flow rule,
create_session() has been refactored into create_inline_session()
and create_lookaside_session(). The create_inline_session() function
uses the socket_ctx data and is now called at initialisation in
sa_add_rules().
The max_session_size() function has been added to calculate memory
requirements.
The cryprodev_init() function has been refactored to drop calls to
rte_mempool_create() and to drop calculation of memory requirements.
The main() function has been refactored to call max_session_size() and
to call session_pool_init() and session_priv_pool_init() earlier.
The ports are started now before adding a flow rule in main().
The sa_init(), sp4_init(), sp6_init() and rt_init() functions are
now called after the ports have been started.
The rte_ipsec_session_prepare() function is called in fill_ipsec_session()
for inline which is called from the ipsec_sa_init() function.
Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload")
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: stable@dpdk.org
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 244 ++++++++++---------
examples/ipsec-secgw/ipsec.c | 449 +++++++++++++++++++----------------
examples/ipsec-secgw/ipsec.h | 5 +-
examples/ipsec-secgw/ipsec_process.c | 9 +-
examples/ipsec-secgw/sa.c | 46 +++-
5 files changed, 403 insertions(+), 350 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 6c626fa..24876ba 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1628,7 +1628,7 @@ cryptodevs_init(void)
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
uint16_t idx, max_nb_qps, qp, i;
- int16_t cdev_id, port_id;
+ int16_t cdev_id;
struct rte_hash_parameters params = { 0 };
params.entries = CDEV_MAP_ENTRIES;
@@ -1651,45 +1651,6 @@ cryptodevs_init(void)
printf("lcore/cryptodev/qp mappings:\n");
- uint32_t max_sess_sz = 0, sess_sz;
- for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
- void *sec_ctx;
-
- /* Get crypto priv session size */
- sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
-
- /*
- * If crypto device is security capable, need to check the
- * size of security session as well.
- */
-
- /* Get security context of the crypto device */
- sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
- if (sec_ctx == NULL)
- continue;
-
- /* Get size of security session */
- sess_sz = rte_security_session_get_size(sec_ctx);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
- }
- RTE_ETH_FOREACH_DEV(port_id) {
- void *sec_ctx;
-
- if ((enabled_port_mask & (1 << port_id)) == 0)
- continue;
-
- sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
- if (sec_ctx == NULL)
- continue;
-
- sess_sz = rte_security_session_get_size(sec_ctx);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
- }
-
idx = 0;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
struct rte_cryptodev_info cdev_info;
@@ -1727,45 +1688,6 @@ cryptodevs_init(void)
"Device does not support at least %u "
"sessions", CDEV_MP_NB_OBJS);
- if (!socket_ctx[dev_conf.socket_id].session_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_%u", dev_conf.socket_id);
- sess_mp = rte_cryptodev_sym_session_pool_create(
- mp_name, CDEV_MP_NB_OBJS,
- 0, CDEV_MP_CACHE_SZ, 0,
- dev_conf.socket_id);
- socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
- }
-
- if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_priv_%u", dev_conf.socket_id);
- sess_mp = rte_mempool_create(mp_name,
- CDEV_MP_NB_OBJS,
- max_sess_sz,
- CDEV_MP_CACHE_SZ,
- 0, NULL, NULL, NULL,
- NULL, dev_conf.socket_id,
- 0);
- socket_ctx[dev_conf.socket_id].session_priv_pool =
- sess_mp;
- }
-
- if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
- !socket_ctx[dev_conf.socket_id].session_pool)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool on socket %d\n",
- dev_conf.socket_id);
- else
- printf("Allocated session pool on socket %d\n",
- dev_conf.socket_id);
-
if (rte_cryptodev_configure(cdev_id, &dev_conf))
rte_panic("Failed to initialize cryptodev %u\n",
cdev_id);
@@ -1786,39 +1708,6 @@ cryptodevs_init(void)
cdev_id);
}
- /* create session pools for eth devices that implement security */
- RTE_ETH_FOREACH_DEV(port_id) {
- if ((enabled_port_mask & (1 << port_id)) &&
- rte_eth_dev_get_sec_ctx(port_id)) {
- int socket_id = rte_eth_dev_socket_id(port_id);
-
- if (!socket_ctx[socket_id].session_priv_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_%u", socket_id);
- sess_mp = rte_mempool_create(mp_name,
- (CDEV_MP_NB_OBJS * 2),
- max_sess_sz,
- CDEV_MP_CACHE_SZ,
- 0, NULL, NULL, NULL,
- NULL, socket_id,
- 0);
- if (sess_mp == NULL)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool "
- "on socket %d\n", socket_id);
- else
- printf("Allocated session pool "
- "on socket %d\n", socket_id);
- socket_ctx[socket_id].session_priv_pool =
- sess_mp;
- }
- }
- }
-
-
printf("\n");
return 0;
@@ -1984,6 +1873,99 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
printf("\n");
}
+static size_t
+max_session_size(void)
+{
+ size_t max_sz, sz;
+ void *sec_ctx;
+ int16_t cdev_id, port_id, n;
+
+ max_sz = 0;
+ n = rte_cryptodev_count();
+ for (cdev_id = 0; cdev_id != n; cdev_id++) {
+ sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (sz > max_sz)
+ max_sz = sz;
+ /*
+ * If crypto device is security capable, need to check the
+ * size of security session as well.
+ */
+
+ /* Get security context of the crypto device */
+ sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ /* Get size of security session */
+ sz = rte_security_session_get_size(sec_ctx);
+ if (sz > max_sz)
+ max_sz = sz;
+ }
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ sz = rte_security_session_get_size(sec_ctx);
+ if (sz > max_sz)
+ max_sz = sz;
+ }
+
+ return max_sz;
+}
+
+static void
+session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
+{
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+ sess_mp = rte_cryptodev_sym_session_pool_create(
+ mp_name, CDEV_MP_NB_OBJS,
+ sess_sz, CDEV_MP_CACHE_SZ, 0,
+ socket_id);
+ ctx->session_pool = sess_mp;
+
+ if (ctx->session_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init session pool on socket %d\n", socket_id);
+ else
+ printf("Allocated session pool on socket %d\n", socket_id);
+}
+
+static void
+session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
+ size_t sess_sz)
+{
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_priv_%u", socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+ ctx->session_priv_pool = sess_mp;
+
+ if (ctx->session_priv_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init session priv pool on socket %d\n",
+ socket_id);
+ else
+ printf("Allocated session priv pool on socket %d\n",
+ socket_id);
+}
+
static void
pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
{
@@ -2064,9 +2046,11 @@ main(int32_t argc, char **argv)
{
int32_t ret;
uint32_t lcore_id;
+ uint32_t i;
uint8_t socket_id;
uint16_t portid;
uint64_t req_rx_offloads, req_tx_offloads;
+ size_t sess_sz;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -2094,7 +2078,8 @@ main(int32_t argc, char **argv)
nb_lcores = rte_lcore_count();
- /* Replicate each context per socket */
+ sess_sz = max_session_size();
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
@@ -2104,20 +2089,14 @@ main(int32_t argc, char **argv)
else
socket_id = 0;
+ /* mbuf_pool is initialised by the pool_init() function*/
if (socket_ctx[socket_id].mbuf_pool)
continue;
- /* initilaze SPD */
- sp4_init(&socket_ctx[socket_id], socket_id);
-
- sp6_init(&socket_ctx[socket_id], socket_id);
-
- /* initilaze SAD */
- sa_init(&socket_ctx[socket_id], socket_id);
-
- rt_init(&socket_ctx[socket_id], socket_id);
-
pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
+ session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
+ session_priv_pool_init(&socket_ctx[socket_id], socket_id,
+ sess_sz);
}
RTE_ETH_FOREACH_DEV(portid) {
@@ -2135,7 +2114,11 @@ main(int32_t argc, char **argv)
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- /* Start device */
+ /*
+ * Start device
+ * note: device must be started before a flow rule
+ * can be installed.
+ */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
@@ -2153,6 +2136,19 @@ main(int32_t argc, char **argv)
RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
}
+ /* Replicate each context per socket */
+ for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
+ socket_id = rte_socket_id_by_idx(i);
+ if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+ (socket_ctx[socket_id].sa_in == NULL) &&
+ (socket_ctx[socket_id].sa_out == NULL)) {
+ sa_init(&socket_ctx[socket_id], socket_id);
+ sp4_init(&socket_ctx[socket_id], socket_id);
+ sp6_init(&socket_ctx[socket_id], socket_id);
+ rt_init(&socket_ctx[socket_id], socket_id);
+ }
+ }
+
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 7b85330..c06ddd6 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -40,7 +40,7 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
}
int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
+create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
@@ -53,19 +53,17 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
- if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
- ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
- (void **)&cdev_id_qp);
- if (ret < 0) {
- RTE_LOG(ERR, IPSEC,
+ ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
+ (void **)&cdev_id_qp);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
"No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u, aead_algo %u\n",
key.lcore_id,
key.cipher_algo,
key.auth_algo,
key.aead_algo);
- return -1;
- }
+ return -1;
}
RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
@@ -108,227 +106,267 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
"SEC Session init failed: err: %d\n", ret);
return -1;
}
- } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
- struct rte_flow_error err;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- sa->portid);
- const struct rte_security_capability *sec_cap;
- int ret = 0;
-
- sa->sec_session = rte_security_session_create(ctx,
- &sess_conf, ipsec_ctx->session_priv_pool);
- if (sa->sec_session == NULL) {
- RTE_LOG(ERR, IPSEC,
- "SEC Session init failed: err: %d\n", ret);
+ } else if (
+ (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) ||
+ (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ ) {
+ RTE_LOG(ERR, IPSEC, "Inline not supported\n");
return -1;
- }
-
- sec_cap = rte_security_capabilities_get(ctx);
+ }
+ } else {
+ sa->crypto_session = rte_cryptodev_sym_session_create(
+ ipsec_ctx->session_pool);
+ rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+ sa->crypto_session, sa->xforms,
+ ipsec_ctx->session_priv_pool);
- /* iterate until ESP tunnel*/
- while (sec_cap->action !=
- RTE_SECURITY_ACTION_TYPE_NONE) {
+ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
+ &cdev_info);
+ }
- if (sec_cap->action == sa->type &&
- sec_cap->protocol ==
- RTE_SECURITY_PROTOCOL_IPSEC &&
- sec_cap->ipsec.mode ==
- RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
- sec_cap->ipsec.direction == sa->direction)
- break;
- sec_cap++;
- }
+ sa->cdev_id_qp = cdev_id_qp;
- if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
- RTE_LOG(ERR, IPSEC,
- "No suitable security capability found\n");
- return -1;
- }
+ return 0;
+}
- sa->ol_flags = sec_cap->ol_flags;
- sa->security_ctx = ctx;
- sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
-
- sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
- sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
- if (sa->flags & IP6_TUNNEL) {
- sa->pattern[1].spec = &sa->ipv6_spec;
- memcpy(sa->ipv6_spec.hdr.dst_addr,
- sa->dst.ip.ip6.ip6_b, 16);
- memcpy(sa->ipv6_spec.hdr.src_addr,
- sa->src.ip.ip6.ip6_b, 16);
- } else {
- sa->pattern[1].spec = &sa->ipv4_spec;
- sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
- sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
- }
+int
+create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa)
+{
+ int32_t ret = 0;
+ struct rte_security_ctx *sec_ctx;
+ struct rte_security_session_conf sess_conf = {
+ .action_type = sa->type,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .spi = sa->spi,
+ .salt = sa->salt,
+ .options = { 0 },
+ .direction = sa->direction,
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = (sa->flags == IP4_TUNNEL ||
+ sa->flags == IP6_TUNNEL) ?
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ } },
+ .crypto_xform = sa->xforms,
+ .userdata = NULL,
+ };
+
+ RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
+ sa->spi, sa->portid);
+
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ struct rte_flow_error err;
+ const struct rte_security_capability *sec_cap;
+ int ret = 0;
+
+ sec_ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ sa->portid);
+ if (sec_ctx == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ " rte_eth_dev_get_sec_ctx failed\n");
+ return -1;
+ }
- sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
- sa->pattern[2].spec = &sa->esp_spec;
- sa->pattern[2].mask = &rte_flow_item_esp_mask;
- sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
+ sa->sec_session = rte_security_session_create(sec_ctx,
+ &sess_conf, skt_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "SEC Session init failed: err: %d\n", ret);
+ return -1;
+ }
- sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+ sec_cap = rte_security_capabilities_get(sec_ctx);
+
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (sec_cap->action == sa->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
- sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
- sa->action[0].conf = sa->sec_session;
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
+ "No suitable security capability found\n");
+ return -1;
+ }
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ sa->ol_flags = sec_cap->ol_flags;
+ sa->security_ctx = sec_ctx;
+ sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
+ if (sa->flags & IP6_TUNNEL) {
+ sa->pattern[1].spec = &sa->ipv6_spec;
+ memcpy(sa->ipv6_spec.hdr.dst_addr,
+ sa->dst.ip.ip6.ip6_b, 16);
+ memcpy(sa->ipv6_spec.hdr.src_addr,
+ sa->src.ip.ip6.ip6_b, 16);
+ } else {
+ sa->pattern[1].spec = &sa->ipv4_spec;
+ sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
+ sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
+ }
- sa->attr.egress = (sa->direction ==
- RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
- sa->attr.ingress = (sa->direction ==
- RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
- if (sa->attr.ingress) {
- uint8_t rss_key[40];
- struct rte_eth_rss_conf rss_conf = {
- .rss_key = rss_key,
- .rss_key_len = 40,
- };
- struct rte_eth_dev *eth_dev;
- uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
- struct rte_flow_action_rss action_rss;
- unsigned int i;
- unsigned int j;
-
- sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
- /* Try RSS. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
- sa->action[1].conf = &action_rss;
- eth_dev = ctx->device;
- rte_eth_dev_rss_hash_conf_get(sa->portid,
- &rss_conf);
- for (i = 0, j = 0;
- i < eth_dev->data->nb_rx_queues; ++i)
- if (eth_dev->data->rx_queues[i])
- queue[j++] = i;
- action_rss = (struct rte_flow_action_rss){
+ sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
+ sa->pattern[2].spec = &sa->esp_spec;
+ sa->pattern[2].mask = &rte_flow_item_esp_mask;
+ sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
+
+ sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ sa->action[0].conf = sa->sec_session;
+
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ sa->attr.egress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
+ sa->attr.ingress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+ if (sa->attr.ingress) {
+ uint8_t rss_key[40];
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = rss_key,
+ .rss_key_len = 40,
+ };
+ struct rte_eth_dev *eth_dev;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ struct rte_flow_action_rss action_rss;
+ unsigned int i;
+ unsigned int j;
+
+ sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
+ /* Try RSS. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+ sa->action[1].conf = &action_rss;
+ eth_dev = sec_ctx->device;
+ rte_eth_dev_rss_hash_conf_get(sa->portid, &rss_conf);
+ for (i = 0, j = 0;
+ i < eth_dev->data->nb_rx_queues; ++i)
+ if (eth_dev->data->rx_queues[i])
+ queue[j++] = i;
+
+ action_rss = (struct rte_flow_action_rss){
.types = rss_conf.rss_hf,
.key_len = rss_conf.rss_key_len,
.queue_num = j,
.key = rss_key,
.queue = queue,
- };
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- if (!ret)
- goto flow_create;
- /* Try Queue. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
- sa->action[1].conf =
- &(struct rte_flow_action_queue){
- .index = 0,
- };
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- /* Try End. */
- sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
- sa->action[1].conf = NULL;
- ret = rte_flow_validate(sa->portid, &sa->attr,
- sa->pattern, sa->action,
- &err);
- if (ret)
- goto flow_create_failure;
- } else if (sa->attr.egress &&
- (sa->ol_flags &
+ };
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ if (!ret)
+ goto flow_create;
+ /* Try Queue. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ sa->action[1].conf =
+ &(struct rte_flow_action_queue){
+ .index = 0,
+ };
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ /* Try End. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ sa->action[1].conf = NULL;
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ if (ret)
+ goto flow_create_failure;
+ } else if (sa->attr.egress &&
+ (sa->ol_flags &
RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
- sa->action[1].type =
+ sa->action[1].type =
RTE_FLOW_ACTION_TYPE_PASSTHRU;
- sa->action[2].type =
+ sa->action[2].type =
RTE_FLOW_ACTION_TYPE_END;
- }
+ }
flow_create:
- sa->flow = rte_flow_create(sa->portid,
+ sa->flow = rte_flow_create(sa->portid,
&sa->attr, sa->pattern, sa->action, &err);
- if (sa->flow == NULL) {
+ if (sa->flow == NULL) {
flow_create_failure:
- RTE_LOG(ERR, IPSEC,
- "Failed to create ipsec flow msg: %s\n",
- err.message);
- return -1;
- }
- } else if (sa->type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
- struct rte_security_ctx *ctx =
- (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(sa->portid);
- const struct rte_security_capability *sec_cap;
-
- if (ctx == NULL) {
- RTE_LOG(ERR, IPSEC,
- "Ethernet device doesn't have security features registered\n");
- return -1;
- }
-
- /* Set IPsec parameters in conf */
- set_ipsec_conf(sa, &(sess_conf.ipsec));
+ RTE_LOG(ERR, IPSEC,
+ "Failed to create ipsec flow msg: %s\n",
+ err.message);
+ return -1;
+ }
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ const struct rte_security_capability *sec_cap;
- /* Save SA as userdata for the security session. When
- * the packet is received, this userdata will be
- * retrieved using the metadata from the packet.
- *
- * The PMD is expected to set similar metadata for other
- * operations, like rte_eth_event, which are tied to
- * security session. In such cases, the userdata could
- * be obtained to uniquely identify the security
- * parameters denoted.
- */
+ sec_ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(sa->portid);
- sess_conf.userdata = (void *) sa;
+ if (sec_ctx == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "Ethernet device doesn't have security features registered\n");
+ return -1;
+ }
- sa->sec_session = rte_security_session_create(ctx,
- &sess_conf, ipsec_ctx->session_pool);
- if (sa->sec_session == NULL) {
- RTE_LOG(ERR, IPSEC,
+ /* Set IPsec parameters in conf */
+ set_ipsec_conf(sa, &(sess_conf.ipsec));
+
+ /* Save SA as userdata for the security session. When
+ * the packet is received, this userdata will be
+ * retrieved using the metadata from the packet.
+ *
+ * The PMD is expected to set similar metadata for other
+ * operations, like rte_eth_event, which are tied to
+ * security session. In such cases, the userdata could
+ * be obtained to uniquely identify the security
+ * parameters denoted.
+ */
+
+ sess_conf.userdata = (void *) sa;
+
+ sa->sec_session = rte_security_session_create(sec_ctx,
+ &sess_conf, skt_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
"SEC Session init failed: err: %d\n", ret);
- return -1;
- }
-
- sec_cap = rte_security_capabilities_get(ctx);
+ return -1;
+ }
- if (sec_cap == NULL) {
- RTE_LOG(ERR, IPSEC,
+ sec_cap = rte_security_capabilities_get(sec_ctx);
+ if (sec_cap == NULL) {
+ RTE_LOG(ERR, IPSEC,
"No capabilities registered\n");
- return -1;
- }
+ return -1;
+ }
- /* iterate until ESP tunnel*/
- while (sec_cap->action !=
- RTE_SECURITY_ACTION_TYPE_NONE) {
-
- if (sec_cap->action == sa->type &&
- sec_cap->protocol ==
- RTE_SECURITY_PROTOCOL_IPSEC &&
- sec_cap->ipsec.mode ==
- RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
- sec_cap->ipsec.direction == sa->direction)
- break;
- sec_cap++;
- }
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (sec_cap->action == sa->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
- if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
- RTE_LOG(ERR, IPSEC,
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
"No suitable security capability found\n");
- return -1;
- }
-
- sa->ol_flags = sec_cap->ol_flags;
- sa->security_ctx = ctx;
+ return -1;
}
- } else {
- sa->crypto_session = rte_cryptodev_sym_session_create(
- ipsec_ctx->session_pool);
- rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
- sa->crypto_session, sa->xforms,
- ipsec_ctx->session_priv_pool);
- rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
- &cdev_info);
+ sa->ol_flags = sec_cap->ol_flags;
+ sa->security_ctx = sec_ctx;
}
- sa->cdev_id_qp = cdev_id_qp;
+
+ sa->cdev_id_qp = 0;
return 0;
}
@@ -395,7 +433,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
+ create_lookaside_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -414,7 +452,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->crypto_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
+ create_lookaside_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -429,12 +467,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
}
break;
case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
- if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
+ RTE_ASSERT(sa->sec_session != NULL);
ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
rte_security_set_pkt_metadata(
@@ -442,17 +475,11 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
sa->sec_session, pkts[i], NULL);
continue;
case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ RTE_ASSERT(sa->sec_session != NULL);
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
-
- if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
rte_security_attach_session(&priv->cop,
sa->sec_session);
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index e9272d7..41bac0b 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -312,6 +312,9 @@ void
enqueue_cop_burst(struct cdev_qp *cqp);
int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+
+int
+create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa);
#endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 3f9cacb..868f1a2 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -95,22 +95,23 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
/* setup crypto section */
if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
if (sa->crypto_session == NULL) {
- rc = create_session(ctx, sa);
+ rc = create_lookaside_session(ctx, sa);
if (rc != 0)
return rc;
}
ss->crypto.ses = sa->crypto_session;
/* setup session action type */
- } else {
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
if (sa->sec_session == NULL) {
- rc = create_session(ctx, sa);
+ rc = create_lookaside_session(ctx, sa);
if (rc != 0)
return rc;
}
ss->security.ses = sa->sec_session;
ss->security.ctx = sa->security_ctx;
ss->security.ol_flags = sa->ol_flags;
- }
+ } else
+ RTE_ASSERT(0);
rc = rte_ipsec_session_prepare(ss);
if (rc != 0)
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 8d47d1d..e8e55bf 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -777,11 +777,13 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries, uint32_t inbound)
+ uint32_t nb_entries, uint32_t inbound,
+ struct socket_ctx *skt_ctx)
{
struct ipsec_sa *sa;
uint32_t i, idx;
uint16_t iv_length, aad_length;
+ int32_t rc;
/* for ESN upper 32 bits of SQN also need to be part of AAD */
aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
@@ -834,6 +836,17 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->xforms = &sa_ctx->xf[idx].a;
+ if (sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ rc = create_inline_session(skt_ctx, sa);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "create_inline_session() failed\n");
+ return -EINVAL;
+ }
+ }
print_one_sa_rule(sa, inbound);
} else {
switch (sa->cipher_algo) {
@@ -909,16 +922,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 0);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 1);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
}
/*
@@ -1012,10 +1025,12 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
return 0;
}
-static void
+static int
fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
const struct ipsec_sa *lsa)
{
+ int32_t rc = 0;
+
ss->sa = sa;
ss->type = lsa->type;
@@ -1028,6 +1043,17 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
ss->security.ctx = lsa->security_ctx;
ss->security.ol_flags = lsa->ol_flags;
}
+
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ if (ss->security.ses != NULL) {
+ rc = rte_ipsec_session_prepare(ss);
+ if (rc != 0)
+ memset(ss, 0, sizeof(*ss));
+ }
+ }
+
+ return rc;
}
/*
@@ -1062,8 +1088,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
if (rc < 0)
return rc;
- fill_ipsec_session(&lsa->ips, sa, lsa);
- return 0;
+ rc = fill_ipsec_session(&lsa->ips, sa, lsa);
+ return rc;
}
/*
@@ -1166,7 +1192,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
+ sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
@@ -1186,7 +1212,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
+ sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto
@ 2019-06-06 11:00 Bernard Iremonger
0 siblings, 0 replies; 7+ messages in thread
From: Bernard Iremonger @ 2019-06-06 11:00 UTC (permalink / raw)
To: qabuild; +Cc: Bernard Iremonger, stable
Inline crypto installs a flow rule in the NIC. This flow
rule must be installed before the first inbound packet is
received.
The create_session() function installs the flow rule,
create_session() has been refactored into create_inline_session()
and create_lookaside_session(). The create_inline_session() function
uses the socket_ctx data and is now called at initialisation in
sa_add_rules().
The max_session_size() function has been added to calculate memory
requirements.
The cryprodev_init() function has been refactored to drop calls to
rte_mempool_create() and to drop calculation of memory requirements.
The main() function has been refactored to call max_session_size() and
to call session_pool_init() and session_priv_pool_init() earlier.
The ports are started now before adding a flow rule in main().
The sa_init(), sp4_init(), sp6_init() and rt_init() functions are
now called after the ports have been started.
The rte_ipsec_session_prepare() function is called in fill_ipsec_session()
for inline which is called from the ipsec_sa_init() function.
Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload")
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: stable@dpdk.org
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 244 +++++++++++++++++------------------
examples/ipsec-secgw/ipsec.c | 122 ++++++++++++------
examples/ipsec-secgw/ipsec.h | 5 +-
examples/ipsec-secgw/ipsec_process.c | 9 +-
examples/ipsec-secgw/sa.c | 46 +++++--
5 files changed, 245 insertions(+), 181 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 6c626fa..24876ba 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1628,7 +1628,7 @@ cryptodevs_init(void)
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
uint16_t idx, max_nb_qps, qp, i;
- int16_t cdev_id, port_id;
+ int16_t cdev_id;
struct rte_hash_parameters params = { 0 };
params.entries = CDEV_MAP_ENTRIES;
@@ -1651,45 +1651,6 @@ cryptodevs_init(void)
printf("lcore/cryptodev/qp mappings:\n");
- uint32_t max_sess_sz = 0, sess_sz;
- for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
- void *sec_ctx;
-
- /* Get crypto priv session size */
- sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
-
- /*
- * If crypto device is security capable, need to check the
- * size of security session as well.
- */
-
- /* Get security context of the crypto device */
- sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
- if (sec_ctx == NULL)
- continue;
-
- /* Get size of security session */
- sess_sz = rte_security_session_get_size(sec_ctx);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
- }
- RTE_ETH_FOREACH_DEV(port_id) {
- void *sec_ctx;
-
- if ((enabled_port_mask & (1 << port_id)) == 0)
- continue;
-
- sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
- if (sec_ctx == NULL)
- continue;
-
- sess_sz = rte_security_session_get_size(sec_ctx);
- if (sess_sz > max_sess_sz)
- max_sess_sz = sess_sz;
- }
-
idx = 0;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
struct rte_cryptodev_info cdev_info;
@@ -1727,45 +1688,6 @@ cryptodevs_init(void)
"Device does not support at least %u "
"sessions", CDEV_MP_NB_OBJS);
- if (!socket_ctx[dev_conf.socket_id].session_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_%u", dev_conf.socket_id);
- sess_mp = rte_cryptodev_sym_session_pool_create(
- mp_name, CDEV_MP_NB_OBJS,
- 0, CDEV_MP_CACHE_SZ, 0,
- dev_conf.socket_id);
- socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
- }
-
- if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_priv_%u", dev_conf.socket_id);
- sess_mp = rte_mempool_create(mp_name,
- CDEV_MP_NB_OBJS,
- max_sess_sz,
- CDEV_MP_CACHE_SZ,
- 0, NULL, NULL, NULL,
- NULL, dev_conf.socket_id,
- 0);
- socket_ctx[dev_conf.socket_id].session_priv_pool =
- sess_mp;
- }
-
- if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
- !socket_ctx[dev_conf.socket_id].session_pool)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool on socket %d\n",
- dev_conf.socket_id);
- else
- printf("Allocated session pool on socket %d\n",
- dev_conf.socket_id);
-
if (rte_cryptodev_configure(cdev_id, &dev_conf))
rte_panic("Failed to initialize cryptodev %u\n",
cdev_id);
@@ -1786,39 +1708,6 @@ cryptodevs_init(void)
cdev_id);
}
- /* create session pools for eth devices that implement security */
- RTE_ETH_FOREACH_DEV(port_id) {
- if ((enabled_port_mask & (1 << port_id)) &&
- rte_eth_dev_get_sec_ctx(port_id)) {
- int socket_id = rte_eth_dev_socket_id(port_id);
-
- if (!socket_ctx[socket_id].session_priv_pool) {
- char mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
- snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
- "sess_mp_%u", socket_id);
- sess_mp = rte_mempool_create(mp_name,
- (CDEV_MP_NB_OBJS * 2),
- max_sess_sz,
- CDEV_MP_CACHE_SZ,
- 0, NULL, NULL, NULL,
- NULL, socket_id,
- 0);
- if (sess_mp == NULL)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool "
- "on socket %d\n", socket_id);
- else
- printf("Allocated session pool "
- "on socket %d\n", socket_id);
- socket_ctx[socket_id].session_priv_pool =
- sess_mp;
- }
- }
- }
-
-
printf("\n");
return 0;
@@ -1984,6 +1873,99 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
printf("\n");
}
+static size_t
+max_session_size(void)
+{
+ size_t max_sz, sz;
+ void *sec_ctx;
+ int16_t cdev_id, port_id, n;
+
+ max_sz = 0;
+ n = rte_cryptodev_count();
+ for (cdev_id = 0; cdev_id != n; cdev_id++) {
+ sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (sz > max_sz)
+ max_sz = sz;
+ /*
+ * If crypto device is security capable, need to check the
+ * size of security session as well.
+ */
+
+ /* Get security context of the crypto device */
+ sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ /* Get size of security session */
+ sz = rte_security_session_get_size(sec_ctx);
+ if (sz > max_sz)
+ max_sz = sz;
+ }
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ sz = rte_security_session_get_size(sec_ctx);
+ if (sz > max_sz)
+ max_sz = sz;
+ }
+
+ return max_sz;
+}
+
+static void
+session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
+{
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+ sess_mp = rte_cryptodev_sym_session_pool_create(
+ mp_name, CDEV_MP_NB_OBJS,
+ sess_sz, CDEV_MP_CACHE_SZ, 0,
+ socket_id);
+ ctx->session_pool = sess_mp;
+
+ if (ctx->session_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init session pool on socket %d\n", socket_id);
+ else
+ printf("Allocated session pool on socket %d\n", socket_id);
+}
+
+static void
+session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
+ size_t sess_sz)
+{
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_priv_%u", socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+ ctx->session_priv_pool = sess_mp;
+
+ if (ctx->session_priv_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init session priv pool on socket %d\n",
+ socket_id);
+ else
+ printf("Allocated session priv pool on socket %d\n",
+ socket_id);
+}
+
static void
pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
{
@@ -2064,9 +2046,11 @@ main(int32_t argc, char **argv)
{
int32_t ret;
uint32_t lcore_id;
+ uint32_t i;
uint8_t socket_id;
uint16_t portid;
uint64_t req_rx_offloads, req_tx_offloads;
+ size_t sess_sz;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -2094,7 +2078,8 @@ main(int32_t argc, char **argv)
nb_lcores = rte_lcore_count();
- /* Replicate each context per socket */
+ sess_sz = max_session_size();
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
@@ -2104,20 +2089,14 @@ main(int32_t argc, char **argv)
else
socket_id = 0;
+ /* mbuf_pool is initialised by the pool_init() function*/
if (socket_ctx[socket_id].mbuf_pool)
continue;
- /* initilaze SPD */
- sp4_init(&socket_ctx[socket_id], socket_id);
-
- sp6_init(&socket_ctx[socket_id], socket_id);
-
- /* initilaze SAD */
- sa_init(&socket_ctx[socket_id], socket_id);
-
- rt_init(&socket_ctx[socket_id], socket_id);
-
pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
+ session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
+ session_priv_pool_init(&socket_ctx[socket_id], socket_id,
+ sess_sz);
}
RTE_ETH_FOREACH_DEV(portid) {
@@ -2135,7 +2114,11 @@ main(int32_t argc, char **argv)
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- /* Start device */
+ /*
+ * Start device
+ * note: device must be started before a flow rule
+ * can be installed.
+ */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
@@ -2153,6 +2136,19 @@ main(int32_t argc, char **argv)
RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
}
+ /* Replicate each context per socket */
+ for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
+ socket_id = rte_socket_id_by_idx(i);
+ if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+ (socket_ctx[socket_id].sa_in == NULL) &&
+ (socket_ctx[socket_id].sa_out == NULL)) {
+ sa_init(&socket_ctx[socket_id], socket_id);
+ sp4_init(&socket_ctx[socket_id], socket_id);
+ sp6_init(&socket_ctx[socket_id], socket_id);
+ rt_init(&socket_ctx[socket_id], socket_id);
+ }
+ }
+
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 7b85330..40a6123 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -40,7 +40,7 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
}
int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
+create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
@@ -108,23 +108,81 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
"SEC Session init failed: err: %d\n", ret);
return -1;
}
- } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ } else if (
+ (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) ||
+ (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ ) {
+ RTE_LOG(ERR, IPSEC, "Inline not supported\n");
+ return -1;
+ }
+ } else {
+ sa->crypto_session = rte_cryptodev_sym_session_create(
+ ipsec_ctx->session_pool);
+ rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+ sa->crypto_session, sa->xforms,
+ ipsec_ctx->session_priv_pool);
+
+ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
+ &cdev_info);
+ }
+
+ sa->cdev_id_qp = cdev_id_qp;
+
+ return 0;
+}
+
+int
+create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa)
+{
+ unsigned long cdev_id_qp = 0;
+ int32_t ret = 0;
+ struct rte_security_ctx *sec_ctx;
+
+ RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
+ sa->spi, sa->portid);
+
+ if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) {
+ struct rte_security_session_conf sess_conf = {
+ .action_type = sa->type,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .spi = sa->spi,
+ .salt = sa->salt,
+ .options = { 0 },
+ .direction = sa->direction,
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = (sa->flags == IP4_TUNNEL ||
+ sa->flags == IP6_TUNNEL) ?
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ } },
+ .crypto_xform = sa->xforms,
+ .userdata = NULL,
+ };
+
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
struct rte_flow_error err;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- sa->portid);
const struct rte_security_capability *sec_cap;
int ret = 0;
- sa->sec_session = rte_security_session_create(ctx,
- &sess_conf, ipsec_ctx->session_priv_pool);
+ sec_ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ sa->portid);
+ if (sec_ctx == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ " rte_eth_dev_get_sec_ctx failed\n");
+ return -1;
+ }
+
+ sa->sec_session = rte_security_session_create(sec_ctx,
+ &sess_conf, skt_ctx->session_pool);
if (sa->sec_session == NULL) {
RTE_LOG(ERR, IPSEC,
"SEC Session init failed: err: %d\n", ret);
return -1;
}
- sec_cap = rte_security_capabilities_get(ctx);
+ sec_cap = rte_security_capabilities_get(sec_ctx);
/* iterate until ESP tunnel*/
while (sec_cap->action !=
@@ -147,7 +205,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
}
sa->ol_flags = sec_cap->ol_flags;
- sa->security_ctx = ctx;
+ sa->security_ctx = sec_ctx;
sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
@@ -196,7 +254,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
/* Try RSS. */
sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
sa->action[1].conf = &action_rss;
- eth_dev = ctx->device;
+ eth_dev = sec_ctx->device;
rte_eth_dev_rss_hash_conf_get(sa->portid,
&rss_conf);
for (i = 0, j = 0;
@@ -252,12 +310,12 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
}
} else if (sa->type ==
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
- struct rte_security_ctx *ctx =
- (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(sa->portid);
const struct rte_security_capability *sec_cap;
- if (ctx == NULL) {
+ sec_ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(sa->portid);
+
+ if (sec_ctx == NULL) {
RTE_LOG(ERR, IPSEC,
"Ethernet device doesn't have security features registered\n");
return -1;
@@ -279,15 +337,15 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
sess_conf.userdata = (void *) sa;
- sa->sec_session = rte_security_session_create(ctx,
- &sess_conf, ipsec_ctx->session_pool);
+ sa->sec_session = rte_security_session_create(sec_ctx,
+ &sess_conf, skt_ctx->session_pool);
if (sa->sec_session == NULL) {
RTE_LOG(ERR, IPSEC,
"SEC Session init failed: err: %d\n", ret);
return -1;
}
- sec_cap = rte_security_capabilities_get(ctx);
+ sec_cap = rte_security_capabilities_get(sec_ctx);
if (sec_cap == NULL) {
RTE_LOG(ERR, IPSEC,
@@ -316,17 +374,8 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
}
sa->ol_flags = sec_cap->ol_flags;
- sa->security_ctx = ctx;
+ sa->security_ctx = sec_ctx;
}
- } else {
- sa->crypto_session = rte_cryptodev_sym_session_create(
- ipsec_ctx->session_pool);
- rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
- sa->crypto_session, sa->xforms,
- ipsec_ctx->session_priv_pool);
-
- rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
- &cdev_info);
}
sa->cdev_id_qp = cdev_id_qp;
@@ -395,7 +444,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
+ create_lookaside_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -414,7 +463,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->crypto_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
+ create_lookaside_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
@@ -429,12 +478,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
}
break;
case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
- if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
+ RTE_ASSERT(sa->sec_session != NULL);
ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
rte_security_set_pkt_metadata(
@@ -442,17 +486,11 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
sa->sec_session, pkts[i], NULL);
continue;
case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ RTE_ASSERT(sa->sec_session != NULL);
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
-
- if ((unlikely(sa->sec_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
rte_security_attach_session(&priv->cop,
sa->sec_session);
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index e9272d7..41bac0b 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -312,6 +312,9 @@ void
enqueue_cop_burst(struct cdev_qp *cqp);
int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+
+int
+create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa);
#endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 3f9cacb..868f1a2 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -95,22 +95,23 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
/* setup crypto section */
if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
if (sa->crypto_session == NULL) {
- rc = create_session(ctx, sa);
+ rc = create_lookaside_session(ctx, sa);
if (rc != 0)
return rc;
}
ss->crypto.ses = sa->crypto_session;
/* setup session action type */
- } else {
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
if (sa->sec_session == NULL) {
- rc = create_session(ctx, sa);
+ rc = create_lookaside_session(ctx, sa);
if (rc != 0)
return rc;
}
ss->security.ses = sa->sec_session;
ss->security.ctx = sa->security_ctx;
ss->security.ol_flags = sa->ol_flags;
- }
+ } else
+ RTE_ASSERT(0);
rc = rte_ipsec_session_prepare(ss);
if (rc != 0)
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 8d47d1d..e8e55bf 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -777,11 +777,13 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound)
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries, uint32_t inbound)
+ uint32_t nb_entries, uint32_t inbound,
+ struct socket_ctx *skt_ctx)
{
struct ipsec_sa *sa;
uint32_t i, idx;
uint16_t iv_length, aad_length;
+ int32_t rc;
/* for ESN upper 32 bits of SQN also need to be part of AAD */
aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
@@ -834,6 +836,17 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->xforms = &sa_ctx->xf[idx].a;
+ if (sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ rc = create_inline_session(skt_ctx, sa);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "create_inline_session() failed\n");
+ return -EINVAL;
+ }
+ }
print_one_sa_rule(sa, inbound);
} else {
switch (sa->cipher_algo) {
@@ -909,16 +922,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 0);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 1);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
}
/*
@@ -1012,10 +1025,12 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
return 0;
}
-static void
+static int
fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
const struct ipsec_sa *lsa)
{
+ int32_t rc = 0;
+
ss->sa = sa;
ss->type = lsa->type;
@@ -1028,6 +1043,17 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
ss->security.ctx = lsa->security_ctx;
ss->security.ol_flags = lsa->ol_flags;
}
+
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ if (ss->security.ses != NULL) {
+ rc = rte_ipsec_session_prepare(ss);
+ if (rc != 0)
+ memset(ss, 0, sizeof(*ss));
+ }
+ }
+
+ return rc;
}
/*
@@ -1062,8 +1088,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
if (rc < 0)
return rc;
- fill_ipsec_session(&lsa->ips, sa, lsa);
- return 0;
+ rc = fill_ipsec_session(&lsa->ips, sa, lsa);
+ return rc;
}
/*
@@ -1166,7 +1192,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
+ sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
@@ -1186,7 +1212,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
+ sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2019-07-10 13:34 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-10 10:57 [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto Bernard Iremonger
2019-07-10 10:57 ` [dpdk-stable] [DPDK 2/2] examples/ipsec-secgw/test: fix inline test scripts Bernard Iremonger
2019-07-10 12:58 ` [dpdk-stable] [DPDK 1/2] examples/ipsec-secgw: fix 1st pkt dropped for inline crypto Thomas Monjalon
2019-07-10 13:32 ` Iremonger, Bernard
-- strict thread matches above, loose matches on Subject: below --
2019-07-10 12:49 Bernard Iremonger
2019-06-12 14:19 Bernard Iremonger
2019-06-06 11:00 Bernard Iremonger
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).