From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id EC3DCA04B1;
	Wed,  9 Sep 2020 02:28:53 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id E447A1C0D9;
	Wed,  9 Sep 2020 02:28:38 +0200 (CEST)
Received: from mga07.intel.com (mga07.intel.com [134.134.136.100])
 by dpdk.org (Postfix) with ESMTP id 14B531C0D9
 for <dev@dpdk.org>; Wed,  9 Sep 2020 02:28:36 +0200 (CEST)
IronPort-SDR: KcaBuzMKwl1LDUmZlGNU49cGnFppfeCEqAhsyxQIXmTDNyQ2/yEveLN+zniiQ8/RA/lHbxuq6S
 R8mSrnZEtz5w==
X-IronPort-AV: E=McAfee;i="6000,8403,9738"; a="222453015"
X-IronPort-AV: E=Sophos;i="5.76,407,1592895600"; d="scan'208";a="222453015"
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from fmsmga008.fm.intel.com ([10.253.24.58])
 by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 08 Sep 2020 17:28:36 -0700
IronPort-SDR: ieM1wcnL6ItDFC5fIEsbB3N8Tm5SuH2FgtFD5XykUkiaF+tiecGjtz35x17oceeYgvwCMPQsF2
 BSGeL8Suxfkw==
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.76,407,1592895600"; d="scan'208";a="286051378"
Received: from npg-dpdk-patrickfu-casc2.sh.intel.com ([10.67.119.92])
 by fmsmga008.fm.intel.com with ESMTP; 08 Sep 2020 17:28:33 -0700
From: Patrick Fu <patrick.fu@intel.com>
To: dev@dpdk.org
Cc: thomas@monjalon.net, ferruh.yigit@intel.com, maxime.coquelin@redhat.com,
 bruce.richardson@intel.com, mm6021@att.com, zhihong.wang@intel.com,
 liang-min.wang@intel.com, konstantin.ananyev@intel.com,
 timothy.miskell@intel.com, cunming.liang@intel.com, patrick.fu@intel.com
Date: Wed,  9 Sep 2020 08:22:47 +0800
Message-Id: <20200909002247.864844-4-patrick.fu@intel.com>
X-Mailer: git-send-email 2.18.4
In-Reply-To: <20200909002247.864844-1-patrick.fu@intel.com>
References: <20200909002247.864844-1-patrick.fu@intel.com>
Subject: [dpdk-dev] [PATCH v1 3/3] lib/mirror: add flow based mirroring
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

This patch add flow based mirroring support to the mirror lib.
The flow based mirroring is traffic mirroring with flow rules.
Applications may either use a customized callback to apply their
own flow rules, or use MAC matching rules implemented by mirror
lib.

Signed-off-by: Liang-min Wang <liang-min.wang@intel.com>
Signed-off-by: Patrick Fu <patrick.fu@intel.com>
Signed-off-by: Timothy Miskell <timothy.miskell@intel.com>
---
 lib/librte_mirror/rte_mirror.c | 107 +++++++++++++++++++++++++++++++++
 1 file changed, 107 insertions(+)

diff --git a/lib/librte_mirror/rte_mirror.c b/lib/librte_mirror/rte_mirror.c
index d2c0d8eab..523ab37ff 100644
--- a/lib/librte_mirror/rte_mirror.c
+++ b/lib/librte_mirror/rte_mirror.c
@@ -196,6 +196,101 @@ mirror_pkt_update(struct rte_mbuf *pkt, uint16_t dst_vlan_id)
 	rte_mbuf_refcnt_update(pkt, 1);
 }
 
+static inline uint16_t
+mirror_flow_cb(uint16_t qidx, struct rte_mbuf **pkts, uint16_t nb_pkts,
+		void *user_params, bool is_custom, uint8_t mac_offset)
+{
+	struct rte_mirror_param *data = user_params;
+	uint16_t i, dst_qidx, match_count = 0;
+	uint16_t pkt_trans;
+	uint16_t dst_port_id = data->dst_port_id;
+	uint16_t dst_vlan_id = data->dst_vlan_id;
+	uint64_t target_addr = *((uint64_t *)data->extra_data);
+	struct rte_mbuf **pkt_buf = &data->pkt_buf[qidx*data->max_burst_size];
+	uint64_t *mac_addr = 0;
+
+	if (nb_pkts == 0)
+		return 0;
+
+	if (nb_pkts > data->max_burst_size) {
+		MIRROR_LOG(ERR, "Per-flow batch size, %d, exceeds "
+			"maximum limit, %d.\n", nb_pkts, data->max_burst_size);
+		return -EINVAL;
+	}
+
+	if (unlikely(is_custom)) {
+		for (i = 0; i < nb_pkts; i++) {
+			if (data->custom_scan(pkts[i], user_params)) {
+				pkt_buf[match_count] = pkts[i];
+				mirror_pkt_update(pkt_buf[match_count],
+						dst_vlan_id);
+				match_count++;
+			}
+		}
+	} else {
+		for (i = 0; i < nb_pkts; i++) {
+			mac_addr =
+				rte_pktmbuf_mtod_offset(pkts[i],
+						uint64_t *, mac_offset);
+			if (is_mac_addr_match(target_addr, (*mac_addr))) {
+				pkt_buf[match_count] = pkts[i];
+				mirror_pkt_update(pkt_buf[match_count],
+						dst_vlan_id);
+				match_count++;
+			}
+		}
+	}
+
+	dst_qidx = (data->n_dst_queue > qidx) ? qidx : (data->n_dst_queue - 1);
+
+	rte_spinlock_lock(&data->locks[dst_qidx]);
+	pkt_trans = rte_eth_tx_burst(dst_port_id, dst_qidx,
+			pkt_buf, match_count);
+	rte_spinlock_unlock(&data->locks[dst_qidx]);
+
+	for (i = 0; i < match_count; i++)
+		pkt_buf[i]->ol_flags &= ~VLAN_INSERT_FLAG;
+
+	while (unlikely(pkt_trans < match_count)) {
+		rte_pktmbuf_free(pkt_buf[pkt_trans]);
+		pkt_trans++;
+	}
+
+	return nb_pkts;
+}
+
+static uint16_t
+mirror_rx_flow_custom_cb(uint16_t port_id __rte_unused, uint16_t qidx,
+	struct rte_mbuf **pkts, uint16_t nb_pkts,
+	uint16_t maxi_pkts __rte_unused, void *user_params)
+{
+	return mirror_flow_cb(qidx, pkts, nb_pkts, user_params, true, 0);
+}
+
+static uint16_t
+mirror_tx_flow_custom_cb(uint16_t port_id __rte_unused, uint16_t qidx,
+	struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
+{
+	return mirror_flow_cb(qidx, pkts, nb_pkts, user_params, true, 0);
+}
+
+static uint16_t
+mirror_rx_flow_mac_cb(uint16_t port_id __rte_unused, uint16_t qidx,
+	struct rte_mbuf **pkts, uint16_t nb_pkts,
+	uint16_t maxi_pkts __rte_unused, void *user_params)
+{
+	return mirror_flow_cb(qidx, pkts, nb_pkts,
+			user_params, false, DST_MAC_OFFSET);
+}
+
+static uint16_t
+mirror_tx_flow_mac_cb(uint16_t port_id __rte_unused, uint16_t qidx,
+	struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
+{
+	return mirror_flow_cb(qidx, pkts, nb_pkts,
+			user_params, false, SRC_MAC_OFFSET);
+}
+
 static inline uint16_t
 mirror_port_cb(uint16_t qidx, struct rte_mbuf **pkts,
 	uint16_t nb_pkts, void *user_params)
@@ -278,6 +373,18 @@ rte_mirror_offload_register(uint16_t src_port,
 		else
 			rx_fn = mirror_rx_port_cb;
 		break;
+	case rte_mirror_type_flow_mac:
+		if (tx_cb)
+			tx_fn = mirror_tx_flow_mac_cb;
+		else
+			rx_fn = mirror_rx_flow_mac_cb;
+		break;
+	case rte_mirror_type_flow_custom:
+		if (tx_cb)
+			tx_fn = mirror_tx_flow_custom_cb;
+		else
+			rx_fn = mirror_rx_flow_custom_cb;
+		break;
 	default:
 		MIRROR_LOG(ERR, "Un-supported mirror offloading type!!!\n");
 		return -ENOTSUP;
-- 
2.18.4