DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Július Milan" <jmilan.dev@gmail.com>
To: dev@dpdk.org, jgrajcia@cisco.com, ferruh.yigit@intel.com
Subject: [dpdk-dev]  [PATCH v3 1/2] net/memif: enable loopback
Date: Mon, 9 Mar 2020 15:22:08 +0100	[thread overview]
Message-ID: <20200309142208.GA9634@vbox> (raw)

With this patch it is possible to connect 2 DPDK memifs into loopback,
i.e. when they have the same id and different roles, as for example:
  "--vdev=net_memif0,role=master,id=0"
  "--vdev=net_memif1,role=slave,id=0"

Signed-off-by: Július Milan <jmilan.dev@gmail.com>
---
v2: changed log format string of size (type size_t) from %ld to %zu, to
    fix 32-bit build

v3: changed log format string of size (type size_t) from %zu to %zd, as
    size can be a negative number

 drivers/net/memif/memif_socket.c  | 30 ++++++++++++++----------------
 drivers/net/memif/rte_eth_memif.c |  1 +
 drivers/net/memif/rte_eth_memif.h |  1 +
 3 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/drivers/net/memif/memif_socket.c b/drivers/net/memif/memif_socket.c
index ad5e30b96..553f8b004 100644
--- a/drivers/net/memif/memif_socket.c
+++ b/drivers/net/memif/memif_socket.c
@@ -203,7 +203,7 @@ memif_msg_receive_init(struct memif_control_channel *cc, memif_msg_t *msg)
 		dev = elt->dev;
 		pmd = dev->data->dev_private;
 		if (((pmd->flags & ETH_MEMIF_FLAG_DISABLED) == 0) &&
-		    pmd->id == i->id) {
+		    (pmd->id == i->id) && (pmd->role == MEMIF_ROLE_MASTER)) {
 			/* assign control channel to device */
 			cc->dev = dev;
 			pmd->cc = cc;
@@ -528,6 +528,7 @@ memif_disconnect(struct rte_eth_dev *dev)
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
 	pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED;
 
+	rte_spinlock_lock(&pmd->cc_lock);
 	if (pmd->cc != NULL) {
 		/* Clear control message queue (except disconnect message if any). */
 		for (elt = TAILQ_FIRST(&pmd->cc->msg_queue); elt != NULL; elt = next) {
@@ -570,6 +571,7 @@ memif_disconnect(struct rte_eth_dev *dev)
 					"Failed to unregister control channel callback.");
 		}
 	}
+	rte_spinlock_unlock(&pmd->cc_lock);
 
 	/* unconfig interrupts */
 	for (i = 0; i < pmd->cfg.num_s2m_rings; i++) {
@@ -612,7 +614,8 @@ memif_disconnect(struct rte_eth_dev *dev)
 	/* reset connection configuration */
 	memset(&pmd->run, 0, sizeof(pmd->run));
 
-	MIF_LOG(DEBUG, "Disconnected.");
+	MIF_LOG(DEBUG, "Disconnected, id: %d, role: %s.", pmd->id,
+		(pmd->role == MEMIF_ROLE_MASTER) ? "master" : "slave");
 }
 
 static int
@@ -642,8 +645,12 @@ memif_msg_receive(struct memif_control_channel *cc)
 
 	size = recvmsg(cc->intr_handle.fd, &mh, 0);
 	if (size != sizeof(memif_msg_t)) {
-		MIF_LOG(DEBUG, "Invalid message size.");
-		memif_msg_enq_disconnect(cc, "Invalid message size", 0);
+		MIF_LOG(DEBUG, "Invalid message size = %zd", size);
+		if (size > 0)
+			/* 0 means end-of-file, negative size means error,
+			 * don't send further disconnect message in such cases.
+			 */
+			memif_msg_enq_disconnect(cc, "Invalid message size", 0);
 		return -1;
 	}
 	MIF_LOG(DEBUG, "Received msg type: %u.", msg.type);
@@ -965,20 +972,11 @@ memif_socket_init(struct rte_eth_dev *dev, const char *socket_filename)
 	}
 	pmd->socket_filename = socket->filename;
 
-	if (socket->listener != 0 && pmd->role == MEMIF_ROLE_SLAVE) {
-		MIF_LOG(ERR, "Socket is a listener.");
-		return -1;
-	} else if ((socket->listener == 0) && (pmd->role == MEMIF_ROLE_MASTER)) {
-		MIF_LOG(ERR, "Socket is not a listener.");
-		return -1;
-	}
-
 	TAILQ_FOREACH(elt, &socket->dev_queue, next) {
 		tmp_pmd = elt->dev->data->dev_private;
-		if (tmp_pmd->id == pmd->id) {
-			MIF_LOG(ERR, "Memif device with id %d already "
-				"exists on socket %s",
-				pmd->id, socket->filename);
+		if (tmp_pmd->id == pmd->id && tmp_pmd->role == pmd->role) {
+			MIF_LOG(ERR, "Two interfaces with the same id (%d) can "
+				"not have the same role.", pmd->id);
 			return -1;
 		}
 	}
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 27c0f0924..81d71c53a 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -1491,6 +1491,7 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
 	pmd->cfg.num_m2s_rings = 0;
 
 	pmd->cfg.pkt_buffer_size = pkt_buffer_size;
+	rte_spinlock_init(&pmd->cc_lock);
 
 	data = eth_dev->data;
 	data->dev_private = pmd;
diff --git a/drivers/net/memif/rte_eth_memif.h b/drivers/net/memif/rte_eth_memif.h
index 0d2566392..6f45b7072 100644
--- a/drivers/net/memif/rte_eth_memif.h
+++ b/drivers/net/memif/rte_eth_memif.h
@@ -94,6 +94,7 @@ struct pmd_internals {
 	char secret[ETH_MEMIF_SECRET_SIZE]; /**< secret (optional security parameter) */
 
 	struct memif_control_channel *cc;	/**< control channel */
+	rte_spinlock_t cc_lock;			/**< control channel lock */
 
 	/* remote info */
 	char remote_name[RTE_DEV_NAME_MAX_LEN];		/**< remote app name */
-- 
2.17.1


             reply	other threads:[~2020-03-09 12:22 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-09 14:22 Július Milan [this message]
2020-03-09 14:42 ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200309142208.GA9634@vbox \
    --to=jmilan.dev@gmail.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jgrajcia@cisco.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).