From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support
Date: Fri, 13 Sep 2024 11:29:59 +0530 [thread overview]
Message-ID: <20240913055959.3246917-44-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240913055959.3246917-1-vanshika.shukla@nxp.com>
From: Jun Yang <jun.yang@nxp.com>
Support multiple extractions as well as hardware descriptions
instead of hard code.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.h | 1 +
drivers/net/dpaa2/dpaa2_mux.c | 395 ++++++++++++++++-----------
drivers/net/dpaa2/dpaa2_parse_dump.h | 2 +
drivers/net/dpaa2/rte_pmd_dpaa2.h | 8 +-
4 files changed, 247 insertions(+), 159 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index fd6bad7f74..fd3119247a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -198,6 +198,7 @@ enum dpaa2_rx_faf_offset {
FAF_IPV4_FRAM = 34 + DPAA2_FAFE_PSR_SIZE * 8,
FAF_IPV6_FRAM = 42 + DPAA2_FAFE_PSR_SIZE * 8,
FAF_IP_FRAM = 48 + DPAA2_FAFE_PSR_SIZE * 8,
+ FAF_IP_FRAG_FRAM = 50 + DPAA2_FAFE_PSR_SIZE * 8,
FAF_ICMP_FRAM = 57 + DPAA2_FAFE_PSR_SIZE * 8,
FAF_IGMP_FRAM = 58 + DPAA2_FAFE_PSR_SIZE * 8,
FAF_GRE_FRAM = 65 + DPAA2_FAFE_PSR_SIZE * 8,
diff --git a/drivers/net/dpaa2/dpaa2_mux.c b/drivers/net/dpaa2/dpaa2_mux.c
index 5c37701939..79a1c7f981 100644
--- a/drivers/net/dpaa2/dpaa2_mux.c
+++ b/drivers/net/dpaa2/dpaa2_mux.c
@@ -32,8 +32,9 @@ struct dpaa2_dpdmux_dev {
uint8_t num_ifs; /* Number of interfaces in DPDMUX */
};
-struct rte_flow {
- struct dpdmux_rule_cfg rule;
+#define DPAA2_MUX_FLOW_MAX_RULE_NUM 8
+struct dpaa2_mux_flow {
+ struct dpdmux_rule_cfg rule[DPAA2_MUX_FLOW_MAX_RULE_NUM];
};
TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev);
@@ -53,204 +54,287 @@ static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id)
return dpdmux_dev;
}
-struct rte_flow *
+int
rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
- struct rte_flow_item *pattern[],
- struct rte_flow_action *actions[])
+ struct rte_flow_item pattern[],
+ struct rte_flow_action actions[])
{
struct dpaa2_dpdmux_dev *dpdmux_dev;
+ static struct dpkg_profile_cfg s_kg_cfg;
struct dpkg_profile_cfg kg_cfg;
const struct rte_flow_action_vf *vf_conf;
struct dpdmux_cls_action dpdmux_action;
- struct rte_flow *flow = NULL;
- void *key_iova, *mask_iova, *key_cfg_iova = NULL;
+ uint8_t *key_va = NULL, *mask_va = NULL;
+ void *key_cfg_va = NULL;
+ uint64_t key_iova, mask_iova, key_cfg_iova;
uint8_t key_size = 0;
- int ret;
- static int i;
+ int ret = 0, loop = 0;
+ static int s_i;
+ struct dpkg_extract *extract;
+ struct dpdmux_rule_cfg rule;
- if (!pattern || !actions || !pattern[0] || !actions[0])
- return NULL;
+ memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
/* Find the DPDMUX from dpdmux_id in our list */
dpdmux_dev = get_dpdmux_from_id(dpdmux_id);
if (!dpdmux_dev) {
DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id);
- return NULL;
+ ret = -ENODEV;
+ goto creation_error;
}
- key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE,
- RTE_CACHE_LINE_SIZE);
- if (!key_cfg_iova) {
- DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
- return NULL;
+ key_cfg_va = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (!key_cfg_va) {
+ DPAA2_PMD_ERR("Unable to allocate key configure buffer");
+ ret = -ENOMEM;
+ goto creation_error;
+ }
+
+ key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_va,
+ DIST_PARAM_IOVA_SIZE);
+ if (key_cfg_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
+ __func__, key_cfg_va);
+ ret = -ENOBUFS;
+ goto creation_error;
}
- flow = rte_zmalloc(NULL, sizeof(struct rte_flow) +
- (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE);
- if (!flow) {
- DPAA2_PMD_ERR(
- "Memory allocation failure for rule configuration\n");
+
+ key_va = rte_zmalloc(NULL, (2 * DIST_PARAM_IOVA_SIZE),
+ RTE_CACHE_LINE_SIZE);
+ if (!key_va) {
+ DPAA2_PMD_ERR("Unable to allocate flow dist parameter");
+ ret = -ENOMEM;
goto creation_error;
}
- key_iova = (void *)((size_t)flow + sizeof(struct rte_flow));
- mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
+
+ key_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_va,
+ (2 * DIST_PARAM_IOVA_SIZE));
+ if (key_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU mapping for address(%p)",
+ __func__, key_va);
+ ret = -ENOBUFS;
+ goto creation_error;
+ }
+
+ mask_va = key_va + DIST_PARAM_IOVA_SIZE;
+ mask_iova = key_iova + DIST_PARAM_IOVA_SIZE;
/* Currently taking only IP protocol as an extract type.
- * This can be extended to other fields using pattern->type.
+ * This can be exended to other fields using pattern->type.
*/
memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
- switch (pattern[0]->type) {
- case RTE_FLOW_ITEM_TYPE_IPV4:
- {
- const struct rte_flow_item_ipv4 *spec;
-
- kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP;
- kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO;
- kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
- kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
- kg_cfg.num_extracts = 1;
-
- spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec;
- memcpy(key_iova, (const void *)(&spec->hdr.next_proto_id),
- sizeof(uint8_t));
- memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t));
- key_size = sizeof(uint8_t);
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_VLAN:
- {
- const struct rte_flow_item_vlan *spec;
-
- kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
- kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
- kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
- kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FROM_FIELD;
- kg_cfg.extracts[0].extract.from_hdr.offset = 1;
- kg_cfg.extracts[0].extract.from_hdr.size = 1;
- kg_cfg.num_extracts = 1;
-
- spec = (const struct rte_flow_item_vlan *)pattern[0]->spec;
- memcpy((void *)key_iova, (const void *)(&spec->hdr.vlan_tci),
- sizeof(uint16_t));
- memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
- key_size = sizeof(uint16_t);
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_UDP:
- {
- const struct rte_flow_item_udp *spec;
- uint16_t udp_dst_port;
-
- kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_UDP;
- kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
- kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
- kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
- kg_cfg.num_extracts = 1;
-
- spec = (const struct rte_flow_item_udp *)pattern[0]->spec;
- udp_dst_port = rte_constant_bswap16(spec->hdr.dst_port);
- memcpy((void *)key_iova, (const void *)&udp_dst_port,
- sizeof(rte_be16_t));
- memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
- key_size = sizeof(uint16_t);
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_ETH:
- {
- const struct rte_flow_item_eth *spec;
- uint16_t eth_type;
-
- kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_ETH;
- kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_ETH_TYPE;
- kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
- kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
- kg_cfg.num_extracts = 1;
-
- spec = (const struct rte_flow_item_eth *)pattern[0]->spec;
- eth_type = rte_constant_bswap16(spec->hdr.ether_type);
- memcpy((void *)key_iova, (const void *)ð_type,
- sizeof(rte_be16_t));
- memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
- key_size = sizeof(uint16_t);
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_RAW:
- {
- const struct rte_flow_item_raw *spec;
-
- spec = (const struct rte_flow_item_raw *)pattern[0]->spec;
- kg_cfg.extracts[0].extract.from_data.offset = spec->offset;
- kg_cfg.extracts[0].extract.from_data.size = spec->length;
- kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
- kg_cfg.num_extracts = 1;
- memcpy((void *)key_iova, (const void *)spec->pattern,
- spec->length);
- memcpy(mask_iova, pattern[0]->mask, spec->length);
-
- key_size = spec->length;
- }
- break;
+ while (pattern[loop].type != RTE_FLOW_ITEM_TYPE_END) {
+ if (kg_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Too many extracts(%d)",
+ kg_cfg.num_extracts);
+ ret = -ENOTSUP;
+ goto creation_error;
+ }
+ switch (pattern[loop].type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ {
+ const struct rte_flow_item_ipv4 *spec;
+ const struct rte_flow_item_ipv4 *mask;
+
+ extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+ extract->type = DPKG_EXTRACT_FROM_HDR;
+ extract->extract.from_hdr.prot = NET_PROT_IP;
+ extract->extract.from_hdr.field = NH_FLD_IP_PROTO;
+ extract->extract.from_hdr.type = DPKG_FULL_FIELD;
+
+ kg_cfg.num_extracts++;
+
+ spec = pattern[loop].spec;
+ mask = pattern[loop].mask;
+ rte_memcpy(&key_va[key_size],
+ &spec->hdr.next_proto_id, sizeof(uint8_t));
+ if (mask) {
+ rte_memcpy(&mask_va[key_size],
+ &mask->hdr.next_proto_id,
+ sizeof(uint8_t));
+ } else {
+ mask_va[key_size] = 0xff;
+ }
+ key_size += sizeof(uint8_t);
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ {
+ const struct rte_flow_item_vlan *spec;
+ const struct rte_flow_item_vlan *mask;
+
+ extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+ extract->type = DPKG_EXTRACT_FROM_HDR;
+ extract->extract.from_hdr.prot = NET_PROT_VLAN;
+ extract->extract.from_hdr.field = NH_FLD_VLAN_TCI;
+ extract->extract.from_hdr.type = DPKG_FULL_FIELD;
+
+ kg_cfg.num_extracts++;
+
+ spec = pattern[loop].spec;
+ mask = pattern[loop].mask;
+ rte_memcpy(&key_va[key_size],
+ &spec->tci, sizeof(uint16_t));
+ if (mask) {
+ rte_memcpy(&mask_va[key_size],
+ &mask->tci, sizeof(uint16_t));
+ } else {
+ memset(&mask_va[key_size], 0xff,
+ sizeof(rte_be16_t));
+ }
+ key_size += sizeof(uint16_t);
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ {
+ const struct rte_flow_item_udp *spec;
+ const struct rte_flow_item_udp *mask;
+
+ extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+ extract->type = DPKG_EXTRACT_FROM_HDR;
+ extract->extract.from_hdr.prot = NET_PROT_UDP;
+ extract->extract.from_hdr.type = DPKG_FULL_FIELD;
+ extract->extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
+ kg_cfg.num_extracts++;
+
+ spec = pattern[loop].spec;
+ mask = pattern[loop].mask;
+ rte_memcpy(&key_va[key_size],
+ &spec->hdr.dst_port, sizeof(rte_be16_t));
+ if (mask) {
+ rte_memcpy(&mask_va[key_size],
+ &mask->hdr.dst_port,
+ sizeof(rte_be16_t));
+ } else {
+ memset(&mask_va[key_size], 0xff,
+ sizeof(rte_be16_t));
+ }
+ key_size += sizeof(rte_be16_t);
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ {
+ const struct rte_flow_item_eth *spec;
+ const struct rte_flow_item_eth *mask;
+
+ extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+ extract->type = DPKG_EXTRACT_FROM_HDR;
+ extract->extract.from_hdr.prot = NET_PROT_ETH;
+ extract->extract.from_hdr.type = DPKG_FULL_FIELD;
+ extract->extract.from_hdr.field = NH_FLD_ETH_TYPE;
+ kg_cfg.num_extracts++;
+
+ spec = pattern[loop].spec;
+ mask = pattern[loop].mask;
+ rte_memcpy(&key_va[key_size],
+ &spec->type, sizeof(rte_be16_t));
+ if (mask) {
+ rte_memcpy(&mask_va[key_size],
+ &mask->type, sizeof(rte_be16_t));
+ } else {
+ memset(&mask_va[key_size], 0xff,
+ sizeof(rte_be16_t));
+ }
+ key_size += sizeof(rte_be16_t);
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ {
+ const struct rte_flow_item_raw *spec;
+ const struct rte_flow_item_raw *mask;
+
+ spec = pattern[loop].spec;
+ mask = pattern[loop].mask;
+ extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+ extract->type = DPKG_EXTRACT_FROM_DATA;
+ extract->extract.from_data.offset = spec->offset;
+ extract->extract.from_data.size = spec->length;
+ kg_cfg.num_extracts++;
+
+ rte_memcpy(&key_va[key_size],
+ spec->pattern, spec->length);
+ if (mask && mask->pattern) {
+ rte_memcpy(&mask_va[key_size],
+ mask->pattern, spec->length);
+ } else {
+ memset(&mask_va[key_size], 0xff, spec->length);
+ }
+
+ key_size += spec->length;
+ }
+ break;
- default:
- DPAA2_PMD_ERR("Not supported pattern type: %d",
- pattern[0]->type);
- goto creation_error;
+ default:
+ DPAA2_PMD_ERR("Not supported pattern[%d] type: %d",
+ loop, pattern[loop].type);
+ ret = -ENOTSUP;
+ goto creation_error;
+ }
+ loop++;
}
- ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova);
+ ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_va);
if (ret) {
DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret);
goto creation_error;
}
- /* Multiple rules with same DPKG extracts (kg_cfg.extracts) like same
- * offset and length values in raw is supported right now. Different
- * values of kg_cfg may not work.
- */
- if (i == 0) {
- ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
- dpdmux_dev->token,
- (uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova)));
+ if (!s_i) {
+ ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux,
+ CMD_PRI_LOW, dpdmux_dev->token, key_cfg_iova);
if (ret) {
DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)",
- ret);
+ ret);
+ goto creation_error;
+ }
+ rte_memcpy(&s_kg_cfg, &kg_cfg, sizeof(struct dpkg_profile_cfg));
+ } else {
+ if (memcmp(&s_kg_cfg, &kg_cfg,
+ sizeof(struct dpkg_profile_cfg))) {
+ DPAA2_PMD_ERR("%s: Single flow support only.",
+ __func__);
+ ret = -ENOTSUP;
goto creation_error;
}
}
- /* As now our key extract parameters are set, let us configure
- * the rule.
- */
- flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova));
- flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova));
- flow->rule.key_size = key_size;
- flow->rule.entry_index = i++;
- vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf);
+ vf_conf = actions[0].conf;
if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) {
- DPAA2_PMD_ERR("Invalid destination id\n");
+ DPAA2_PMD_ERR("Invalid destination id(%d)", vf_conf->id);
goto creation_error;
}
dpdmux_action.dest_if = vf_conf->id;
- ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
- dpdmux_dev->token, &flow->rule,
- &dpdmux_action);
+ rule.key_iova = key_iova;
+ rule.mask_iova = mask_iova;
+ rule.key_size = key_size;
+ rule.entry_index = s_i;
+ s_i++;
+
+ /* As now our key extract parameters are set, let us configure
+ * the rule.
+ */
+ ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux,
+ CMD_PRI_LOW, dpdmux_dev->token,
+ &rule, &dpdmux_action);
if (ret) {
- DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)",
- ret);
+ DPAA2_PMD_ERR("Add classification entry failed:err(%d)", ret);
goto creation_error;
}
- return flow;
-
creation_error:
- rte_free((void *)key_cfg_iova);
- rte_free((void *)flow);
- return NULL;
+ if (key_cfg_va)
+ rte_free(key_cfg_va);
+ if (key_va)
+ rte_free(key_va);
+
+ return ret;
}
int
@@ -407,10 +491,11 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,
PMD_INIT_FUNC_TRACE();
/* Allocate DPAA2 dpdmux handle */
- dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0);
+ dpdmux_dev = rte_zmalloc(NULL,
+ sizeof(struct dpaa2_dpdmux_dev), RTE_CACHE_LINE_SIZE);
if (!dpdmux_dev) {
DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device");
- return -1;
+ return -ENOMEM;
}
/* Open the dpdmux object */
diff --git a/drivers/net/dpaa2/dpaa2_parse_dump.h b/drivers/net/dpaa2/dpaa2_parse_dump.h
index f1cdc003de..78fd3b768c 100644
--- a/drivers/net/dpaa2/dpaa2_parse_dump.h
+++ b/drivers/net/dpaa2/dpaa2_parse_dump.h
@@ -105,6 +105,8 @@ dpaa2_print_faf(struct dpaa2_fapr_array *fapr)
faf_bits[i].name = "IPv4 1 Present";
else if (i == FAF_IPV6_FRAM)
faf_bits[i].name = "IPv6 1 Present";
+ else if (i == FAF_IP_FRAG_FRAM)
+ faf_bits[i].name = "IP fragment Present";
else if (i == FAF_UDP_FRAM)
faf_bits[i].name = "UDP Present";
else if (i == FAF_TCP_FRAM)
diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2.h b/drivers/net/dpaa2/rte_pmd_dpaa2.h
index f93af1c65f..237c3cd6e7 100644
--- a/drivers/net/dpaa2/rte_pmd_dpaa2.h
+++ b/drivers/net/dpaa2/rte_pmd_dpaa2.h
@@ -26,12 +26,12 @@
* Associated actions.
*
* @return
- * A valid handle in case of success, NULL otherwise.
+ * 0 in case of success, otherwise failure.
*/
-struct rte_flow *
+int
rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
- struct rte_flow_item *pattern[],
- struct rte_flow_action *actions[]);
+ struct rte_flow_item pattern[],
+ struct rte_flow_action actions[]);
int
rte_pmd_dpaa2_mux_flow_destroy(uint32_t dpdmux_id,
uint16_t entry_index);
--
2.25.1
next prev parent reply other threads:[~2024-09-13 6:06 UTC|newest]
Thread overview: 229+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-13 5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13 5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13 5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13 5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13 5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13 5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13 5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13 5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13 5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13 5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13 5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13 5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13 5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13 5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13 5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13 5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13 5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13 5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13 5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13 5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13 5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13 5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13 5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13 5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13 5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13 5:59 ` [v1 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-13 5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13 5:59 ` [v1 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-13 5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13 5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13 5:59 ` [v1 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-13 5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13 5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13 5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13 5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13 5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13 5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13 5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13 5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13 5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13 5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13 5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13 5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13 5:59 ` vanshika.shukla [this message]
2024-09-18 7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18 7:50 ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-14 12:00 ` [v3 00/43] DPAA2 specific patches vanshika.shukla
2024-10-14 12:00 ` [v3 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-14 12:00 ` [v3 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-14 12:00 ` [v3 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-14 12:00 ` [v3 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-14 12:00 ` [v3 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-14 12:00 ` [v3 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-14 12:00 ` [v3 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-14 12:00 ` [v3 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-14 12:00 ` [v3 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-14 12:00 ` [v3 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-14 12:00 ` [v3 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-14 12:00 ` [v3 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-14 12:00 ` [v3 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-15 2:27 ` Stephen Hemminger
2024-10-14 12:00 ` [v3 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-10-15 2:29 ` Stephen Hemminger
2024-10-14 12:00 ` [v3 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-14 12:00 ` [v3 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-15 2:31 ` Stephen Hemminger
2024-10-14 12:01 ` [v3 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-14 12:01 ` [v3 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-14 12:01 ` [v3 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-14 12:01 ` [v3 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-10-14 12:01 ` [v3 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-14 12:01 ` [v3 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-14 12:01 ` [v3 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-14 12:01 ` [v3 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-10-14 12:01 ` [v3 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-14 12:01 ` [v3 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-14 12:01 ` [v3 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-14 12:01 ` [v3 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-14 12:01 ` [v3 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-14 12:01 ` [v3 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-14 12:01 ` [v3 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-14 12:01 ` [v3 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-14 12:01 ` [v3 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-14 12:01 ` [v3 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-14 12:01 ` [v3 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-14 12:01 ` [v3 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-10-14 12:01 ` [v3 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-14 12:01 ` [v3 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-14 12:01 ` [v3 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-14 12:01 ` [v3 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-14 12:01 ` [v3 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-14 12:01 ` [v3 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-14 12:01 ` [v3 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-15 2:32 ` Stephen Hemminger
2024-10-22 19:12 ` [v4 00/42] DPAA2 specific patches vanshika.shukla
2024-10-22 19:12 ` [v4 01/42] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-22 19:12 ` [v4 02/42] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-22 19:12 ` [v4 03/42] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-22 19:12 ` [v4 04/42] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-22 19:12 ` [v4 05/42] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-22 19:12 ` [v4 06/42] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-22 19:12 ` [v4 07/42] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-22 19:12 ` [v4 08/42] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-22 19:12 ` [v4 09/42] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-22 19:12 ` [v4 10/42] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-22 19:12 ` [v4 11/42] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-22 19:12 ` [v4 12/42] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-22 19:12 ` [v4 13/42] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-22 19:12 ` [v4 14/42] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-10-22 19:12 ` [v4 15/42] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-22 19:12 ` [v4 16/42] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-23 1:02 ` Stephen Hemminger
2024-10-22 19:12 ` [v4 17/42] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-22 19:12 ` [v4 18/42] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-22 19:12 ` [v4 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-22 19:12 ` [v4 20/42] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-22 19:12 ` [v4 21/42] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-22 19:12 ` [v4 22/42] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-22 19:12 ` [v4 23/42] net/dpaa2: flow API refactor vanshika.shukla
2024-10-23 0:52 ` Stephen Hemminger
2024-10-23 12:04 ` [EXT] " Vanshika Shukla
2024-10-22 19:12 ` [v4 24/42] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-22 19:12 ` [v4 25/42] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-22 19:12 ` [v4 26/42] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-22 19:12 ` [v4 27/42] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-22 19:12 ` [v4 28/42] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-22 19:12 ` [v4 29/42] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-22 19:12 ` [v4 30/42] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-22 19:12 ` [v4 31/42] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-22 19:12 ` [v4 32/42] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-22 19:12 ` [v4 33/42] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-22 19:12 ` [v4 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-22 19:12 ` [v4 35/42] net/dpaa2: support software taildrop vanshika.shukla
2024-10-22 19:12 ` [v4 36/42] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-22 19:12 ` [v4 37/42] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-22 19:12 ` [v4 38/42] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-22 19:12 ` [v4 39/42] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-22 19:12 ` [v4 40/42] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-22 19:12 ` [v4 41/42] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-22 19:12 ` [v4 42/42] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-23 11:59 ` [v5 00/42] DPAA2 specific patches vanshika.shukla
2024-10-23 11:59 ` [v5 01/42] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-23 11:59 ` [v5 02/42] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-23 11:59 ` [v5 03/42] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-23 11:59 ` [v5 04/42] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-23 11:59 ` [v5 05/42] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-23 11:59 ` [v5 06/42] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-23 11:59 ` [v5 07/42] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-23 11:59 ` [v5 08/42] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-23 11:59 ` [v5 09/42] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-23 11:59 ` [v5 10/42] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-23 11:59 ` [v5 11/42] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-23 11:59 ` [v5 12/42] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-23 11:59 ` [v5 13/42] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-23 11:59 ` [v5 14/42] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-11-09 17:07 ` Thomas Monjalon
2024-10-23 11:59 ` [v5 15/42] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-23 11:59 ` [v5 16/42] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-23 11:59 ` [v5 17/42] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-23 11:59 ` [v5 18/42] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-23 11:59 ` [v5 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-23 11:59 ` [v5 20/42] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-23 11:59 ` [v5 21/42] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-23 11:59 ` [v5 22/42] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-23 11:59 ` [v5 23/42] net/dpaa2: flow API refactor vanshika.shukla
2024-11-09 19:01 ` Thomas Monjalon
2024-10-23 11:59 ` [v5 24/42] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-23 11:59 ` [v5 25/42] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-23 11:59 ` [v5 26/42] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-23 11:59 ` [v5 27/42] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-23 11:59 ` [v5 28/42] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-23 11:59 ` [v5 29/42] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-23 11:59 ` [v5 30/42] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-23 11:59 ` [v5 31/42] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-23 11:59 ` [v5 32/42] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-23 11:59 ` [v5 33/42] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-23 11:59 ` [v5 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-23 11:59 ` [v5 35/42] net/dpaa2: support software taildrop vanshika.shukla
2024-10-23 11:59 ` [v5 36/42] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-23 11:59 ` [v5 37/42] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-23 11:59 ` [v5 38/42] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-23 11:59 ` [v5 39/42] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-23 11:59 ` [v5 40/42] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-23 11:59 ` [v5 41/42] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-23 11:59 ` [v5 42/42] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-11-07 11:24 ` [v5 00/42] DPAA2 specific patches Hemant Agrawal
2024-09-18 7:50 ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18 7:50 ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18 7:50 ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18 7:50 ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18 7:50 ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18 7:50 ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18 7:50 ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18 7:50 ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18 7:50 ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18 7:50 ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18 7:50 ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18 7:50 ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18 7:50 ` [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-18 7:50 ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18 7:50 ` [v2 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-18 7:50 ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18 7:50 ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18 7:50 ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18 7:50 ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18 7:50 ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18 7:50 ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18 7:50 ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18 7:50 ` [v2 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-18 7:50 ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18 7:50 ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18 7:50 ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18 7:50 ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18 7:50 ` [v2 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-18 7:50 ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18 7:50 ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18 7:50 ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18 7:50 ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18 7:50 ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18 7:50 ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18 7:50 ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18 7:50 ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18 7:50 ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18 7:50 ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18 7:50 ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18 7:50 ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18 7:50 ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18 7:50 ` [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-10 2:54 ` [v2 00/43] DPAA2 specific patches Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240913055959.3246917-44-vanshika.shukla@nxp.com \
--to=vanshika.shukla@nxp.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jun.yang@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).