From: Xueming Li <xuemingl@mellanox.com>
To: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>,
Shahaf Shuler <shahafs@mellanox.com>
Cc: Xueming Li <xuemingl@mellanox.com>, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 04/15] net/mlx5: support Rx tunnel type identification
Date: Tue, 10 Apr 2018 21:34:04 +0800 [thread overview]
Message-ID: <20180410133415.189905-5-xuemingl@mellanox.com> (raw)
In-Reply-To: <20180410133415.189905-1-xuemingl@mellanox.com>
This patch introduced tunnel type identification based on flow rules.
If flows of multiple tunnel types built on same queue,
RTE_PTYPE_TUNNEL_MASK will be returned, bits in flow mark could be used
as tunnel type identifier.
Signed-off-by: Xueming Li <xuemingl@mellanox.com>
---
drivers/net/mlx5/mlx5_flow.c | 125 +++++++++++++++++++++++++++++-----
drivers/net/mlx5/mlx5_rxq.c | 11 ++-
drivers/net/mlx5/mlx5_rxtx.c | 12 ++--
drivers/net/mlx5/mlx5_rxtx.h | 9 ++-
drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 21 +++---
drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 17 +++--
6 files changed, 157 insertions(+), 38 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 870d05250..65d7a9b62 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -222,6 +222,7 @@ struct rte_flow {
struct rte_flow_action_rss rss_conf; /**< RSS configuration */
uint16_t (*queues)[]; /**< Queues indexes to use. */
uint8_t rss_key[40]; /**< copy of the RSS key. */
+ uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
@@ -238,6 +239,19 @@ struct rte_flow {
(type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
(type) == RTE_FLOW_ITEM_TYPE_GRE)
+const uint32_t flow_ptype[] = {
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,
+};
+
+#define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12)
+
+const uint32_t ptype_ext[] = {
+ [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_L4_UDP,
+ [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE,
+};
+
/** Structure to generate a simple graph of layers supported by the NIC. */
struct mlx5_flow_items {
/** List of possible actions for these items. */
@@ -437,6 +451,7 @@ struct mlx5_flow_parse {
uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
uint8_t rss_key[40]; /**< copy of the RSS key. */
enum hash_rxq_type layer; /**< Last pattern layer detected. */
+ uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
struct {
struct ibv_flow_attr *ibv_attr;
@@ -860,7 +875,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
if (ret)
goto exit_item_not_supported;
if (IS_TUNNEL(items->type)) {
- if (parser->inner) {
+ if (parser->tunnel) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
@@ -869,6 +884,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
return -rte_errno;
}
parser->inner = IBV_FLOW_SPEC_INNER;
+ parser->tunnel = flow_ptype[items->type];
}
if (parser->drop) {
parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
@@ -1165,6 +1181,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
}
/* Third step. Conversion parse, fill the specifications. */
parser->inner = 0;
+ parser->tunnel = 0;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
struct mlx5_flow_data data = {
.parser = parser,
@@ -1633,6 +1650,7 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
id.vni[0] = 0;
parser->inner = IBV_FLOW_SPEC_INNER;
+ parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)];
if (spec) {
if (!mask)
mask = default_mask;
@@ -1686,6 +1704,7 @@ mlx5_flow_create_gre(const struct rte_flow_item *item __rte_unused,
};
parser->inner = IBV_FLOW_SPEC_INNER;
+ parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)];
mlx5_flow_create_copy(parser, &tunnel, size);
return 0;
}
@@ -1864,7 +1883,8 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
parser->rss_conf.key_len,
hash_fields,
parser->rss_conf.queue,
- parser->rss_conf.queue_num);
+ parser->rss_conf.queue_num,
+ parser->tunnel);
if (flow->frxq[i].hrxq)
continue;
flow->frxq[i].hrxq =
@@ -1873,7 +1893,8 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
parser->rss_conf.key_len,
hash_fields,
parser->rss_conf.queue,
- parser->rss_conf.queue_num);
+ parser->rss_conf.queue_num,
+ parser->tunnel);
if (!flow->frxq[i].hrxq) {
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1885,6 +1906,40 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
}
/**
+ * RXQ update after flow rule creation.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to the flow rule.
+ */
+static void
+mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ if (!dev->data->dev_started)
+ return;
+ for (i = 0; i != flow->rss_conf.queue_num; ++i) {
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
+ [(*flow->queues)[i]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ uint8_t tunnel = PTYPE_IDX(flow->tunnel);
+
+ rxq_data->mark |= flow->mark;
+ if (!tunnel)
+ continue;
+ rxq_ctrl->tunnel_types[tunnel] += 1;
+ if (rxq_data->tunnel != flow->tunnel)
+ rxq_data->tunnel = rxq_data->tunnel ?
+ RTE_PTYPE_TUNNEL_MASK :
+ flow->tunnel;
+ }
+}
+
+/**
* Complete flow rule creation.
*
* @param dev
@@ -1944,12 +1999,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
NULL, "internal error in flow creation");
goto error;
}
- for (i = 0; i != parser->rss_conf.queue_num; ++i) {
- struct mlx5_rxq_data *q =
- (*priv->rxqs)[parser->rss_conf.queue[i]];
-
- q->mark |= parser->mark;
- }
+ mlx5_flow_create_update_rxqs(dev, flow);
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
@@ -2022,6 +2072,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
}
/* Copy configuration. */
flow->queues = (uint16_t (*)[])(flow + 1);
+ flow->tunnel = parser.tunnel;
flow->rss_conf = (struct rte_flow_action_rss){
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
.level = 0,
@@ -2113,9 +2164,38 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
struct priv *priv = dev->data->dev_private;
unsigned int i;
- if (flow->drop || !flow->mark)
+ if (flow->drop || !dev->data->dev_started)
goto free;
- for (i = 0; i != flow->rss_conf.queue_num; ++i) {
+ for (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) {
+ /* Update queue tunnel type. */
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
+ [(*flow->queues)[i]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ uint8_t tunnel = PTYPE_IDX(flow->tunnel);
+
+ RTE_ASSERT(rxq_ctrl->tunnel_types[tunnel] > 0);
+ rxq_ctrl->tunnel_types[tunnel] -= 1;
+ if (!rxq_ctrl->tunnel_types[tunnel]) {
+ /* Update tunnel type. */
+ uint8_t j;
+ uint8_t types = 0;
+ uint8_t last;
+
+ for (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++)
+ if (rxq_ctrl->tunnel_types[j]) {
+ types += 1;
+ last = j;
+ }
+ /* Keep same if more than one tunnel types left. */
+ if (types == 1)
+ rxq_data->tunnel = ptype_ext[last];
+ else if (types == 0)
+ /* No tunnel type left. */
+ rxq_data->tunnel = 0;
+ }
+ }
+ for (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) {
struct rte_flow *tmp;
int mark = 0;
@@ -2334,9 +2414,9 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
+ unsigned int i;
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
- unsigned int i;
struct mlx5_ind_table_ibv *ind_tbl = NULL;
if (flow->drop) {
@@ -2382,6 +2462,16 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id,
(void *)flow);
}
+ /* Cleanup Rx queue tunnel info. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *q = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(q, struct mlx5_rxq_ctrl, rxq);
+
+ memset((void *)rxq_ctrl->tunnel_types, 0,
+ sizeof(rxq_ctrl->tunnel_types));
+ q->tunnel = 0;
+ }
}
/**
@@ -2429,7 +2519,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
flow->rss_conf.key_len,
hash_rxq_init[i].hash_fields,
flow->rss_conf.queue,
- flow->rss_conf.queue_num);
+ flow->rss_conf.queue_num,
+ flow->tunnel);
if (flow->frxq[i].hrxq)
goto flow_create;
flow->frxq[i].hrxq =
@@ -2437,7 +2528,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
flow->rss_conf.key_len,
hash_rxq_init[i].hash_fields,
flow->rss_conf.queue,
- flow->rss_conf.queue_num);
+ flow->rss_conf.queue_num,
+ flow->tunnel);
if (!flow->frxq[i].hrxq) {
DRV_LOG(DEBUG,
"port %u flow %p cannot be applied",
@@ -2459,10 +2551,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
DRV_LOG(DEBUG, "port %u flow %p applied",
dev->data->port_id, (void *)flow);
}
- if (!flow->mark)
- continue;
- for (i = 0; i != flow->rss_conf.queue_num; ++i)
- (*priv->rxqs)[flow->rss_conf.queue[i]]->mark = 1;
+ mlx5_flow_create_update_rxqs(dev, flow);
}
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 1e4354ab3..351acfc0f 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1386,6 +1386,8 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
+ * @param tunnel
+ * Tunnel type.
*
* @return
* The Verbs object initialised, NULL otherwise and rte_errno is set.
@@ -1394,7 +1396,7 @@ struct mlx5_hrxq *
mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n)
+ const uint16_t *queues, uint32_t queues_n, uint32_t tunnel)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
@@ -1438,6 +1440,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
hrxq->qp = qp;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
+ hrxq->tunnel = tunnel;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
@@ -1466,6 +1469,8 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
+ * @param tunnel
+ * Tunnel type.
*
* @return
* An hash Rx queue on success.
@@ -1474,7 +1479,7 @@ struct mlx5_hrxq *
mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n)
+ const uint16_t *queues, uint32_t queues_n, uint32_t tunnel)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
@@ -1489,6 +1494,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
continue;
if (hrxq->hash_fields != hash_fields)
continue;
+ if (hrxq->tunnel != tunnel)
+ continue;
ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 1f422c70b..d061dfc8a 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -34,7 +34,7 @@
#include "mlx5_prm.h"
static __rte_always_inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
static __rte_always_inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -125,12 +125,14 @@ mlx5_set_ptype_table(void)
(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
/* Tunneled - L3 */
+ (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
@@ -1577,6 +1579,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/**
* Translate RX completion flags to packet type.
*
+ * @param[in] rxq
+ * Pointer to RX queue structure.
* @param[in] cqe
* Pointer to CQE.
*
@@ -1586,7 +1590,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Packet type for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
{
uint8_t idx;
uint8_t pinfo = cqe->pkt_info;
@@ -1601,7 +1605,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
* bit[7] = outer_l3_type
*/
idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
- return mlx5_ptype_table[idx];
+ return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
}
/**
@@ -1833,7 +1837,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt = seg;
assert(len >= (rxq->crc_present << 2));
/* Update packet information. */
- pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+ pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
pkt->ol_flags = 0;
if (rss_hash_res && rxq->rss_hash) {
pkt->hash.rss = rss_hash_res;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index a702cb603..6866f6818 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -104,6 +104,7 @@ struct mlx5_rxq_data {
void *cq_uar; /* CQ user access region. */
uint32_t cqn; /* CQ number. */
uint8_t cq_arm_sn; /* CQ arm seq number. */
+ uint32_t tunnel; /* Tunnel information. */
} __rte_cache_aligned;
/* Verbs Rx queue elements. */
@@ -125,6 +126,7 @@ struct mlx5_rxq_ctrl {
struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
+ uint32_t tunnel_types[16]; /* Tunnel type counter. */
unsigned int irq:1; /* Whether IRQ is enabled. */
uint16_t idx; /* Queue index. */
};
@@ -145,6 +147,7 @@ struct mlx5_hrxq {
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
uint64_t hash_fields; /* Verbs Hash fields. */
+ uint32_t tunnel; /* Tunnel type. */
uint32_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
};
@@ -248,11 +251,13 @@ int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n);
+ const uint16_t *queues, uint32_t queues_n,
+ uint32_t tunnel);
struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n);
+ const uint16_t *queues, uint32_t queues_n,
+ uint32_t tunnel);
int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
uint64_t mlx5_get_rx_port_offloads(void);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index bbe1818ef..9f9136108 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -551,6 +551,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
uint64x2_t rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
if (rxq->mark) {
const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
@@ -583,14 +584,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
ptype = vshrn_n_u32(ptype_info, 10);
/* Errored packets will have RTE_PTYPE_ALL_MASK. */
ptype = vorr_u16(ptype, op_err);
- pkts[0]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)];
- pkts[1]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)];
- pkts[2]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)];
- pkts[3]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)];
+ pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
+ pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
+ pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
+ pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
/* Fill flags for checksum and VLAN. */
pinfo = vandq_u32(ptype_info, ptype_ol_mask);
pinfo = vreinterpretq_u32_u8(
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index c088bcb51..d2492481d 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -542,6 +542,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
const __m128i mbuf_init =
_mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
__m128i rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
/* Extract pkt_info field. */
pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
@@ -595,10 +596,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
/* Errored packets will have RTE_PTYPE_ALL_MASK. */
op_err = _mm_srli_epi16(op_err, 8);
ptype = _mm_or_si128(ptype, op_err);
- pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
- pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
- pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
- pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
+ pt_idx0 = _mm_extract_epi8(ptype, 0);
+ pt_idx1 = _mm_extract_epi8(ptype, 2);
+ pt_idx2 = _mm_extract_epi8(ptype, 4);
+ pt_idx3 = _mm_extract_epi8(ptype, 6);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
/* Fill flags for checksum and VLAN. */
pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
--
2.13.3
next prev parent reply other threads:[~2018-04-10 13:34 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-04-10 13:34 [dpdk-dev] [PATCH v2 00/15] mlx5 Rx tunnel offloading Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 01/15] net/mlx5: support 16 hardware priorities Xueming Li
2018-04-10 14:41 ` Nélio Laranjeiro
2018-04-10 15:22 ` Xueming(Steven) Li
2018-04-12 9:09 ` Nélio Laranjeiro
2018-04-12 13:43 ` Xueming(Steven) Li
2018-04-12 14:02 ` Nélio Laranjeiro
2018-04-12 14:46 ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 02/15] net/mlx5: support GRE tunnel flow Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 03/15] net/mlx5: support L3 vxlan flow Xueming Li
2018-04-10 14:53 ` Nélio Laranjeiro
2018-04-10 13:34 ` Xueming Li [this message]
2018-04-10 15:17 ` [dpdk-dev] [PATCH v2 04/15] net/mlx5: support Rx tunnel type identification Nélio Laranjeiro
2018-04-11 8:11 ` Xueming(Steven) Li
2018-04-12 9:50 ` Nélio Laranjeiro
2018-04-12 14:27 ` Xueming(Steven) Li
2018-04-13 8:37 ` Nélio Laranjeiro
2018-04-13 12:09 ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 05/15] net/mlx5: support tunnel inner checksum offloads Xueming Li
2018-04-10 15:27 ` Nélio Laranjeiro
2018-04-11 8:46 ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 06/15] net/mlx5: split flow RSS handling logic Xueming Li
2018-04-10 15:28 ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 07/15] net/mlx5: support tunnel RSS level Xueming Li
2018-04-11 8:55 ` Nélio Laranjeiro
2018-04-14 12:25 ` Xueming(Steven) Li
2018-04-16 7:14 ` Nélio Laranjeiro
2018-04-16 7:46 ` Xueming(Steven) Li
2018-04-16 8:09 ` Nélio Laranjeiro
2018-04-16 10:06 ` Xueming(Steven) Li
2018-04-16 12:27 ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 08/15] net/mlx5: add hardware flow debug dump Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 09/15] net/mlx5: introduce VXLAN-GPE tunnel type Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 10/15] net/mlx5: allow flow tunnel ID 0 with outer pattern Xueming Li
2018-04-11 12:25 ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 11/15] net/mlx5: support MPLS-in-GRE and MPLS-in-UDP Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 12/15] doc: update mlx5 guide on tunnel offloading Xueming Li
2018-04-11 12:32 ` Nélio Laranjeiro
2018-04-11 12:43 ` Thomas Monjalon
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 13/15] net/mlx5: setup RSS flow regardless of queue count Xueming Li
2018-04-11 12:37 ` Nélio Laranjeiro
2018-04-11 13:01 ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 14/15] net/mlx5: fix invalid flow item check Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 15/15] net/mlx5: support RSS configuration in isolated mode Xueming Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180410133415.189905-5-xuemingl@mellanox.com \
--to=xuemingl@mellanox.com \
--cc=dev@dpdk.org \
--cc=nelio.laranjeiro@6wind.com \
--cc=shahafs@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).