DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Burakov, Anatoly" <anatoly.burakov@intel.com>
To: Jakub Grajciar <jgrajcia@cisco.com>, dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v3] net/memif: multi-process support
Date: Tue, 25 Jun 2019 11:26:23 +0100	[thread overview]
Message-ID: <88888329-70b7-0f8d-4943-253254041edf@intel.com> (raw)
In-Reply-To: <20190625100502.20624-1-jgrajcia@cisco.com>

On 25-Jun-19 11:05 AM, Jakub Grajciar wrote:
> Multi-process support for memif PMD.
> Primary process handles connection establishment.
> Secondary process queries for memory regions.
> 
> Signed-off-by: Jakub Grajciar <jgrajcia@cisco.com>
> ---

<snip>

> +/* Message header to synchronize regions */
> +struct mp_region_msg {
> +	char port_name[RTE_DEV_NAME_MAX_LEN];
> +	memif_region_index_t idx;
> +	memif_region_size_t size;
> +};
> +
> +static int
> +memif_mp_send_region(const struct rte_mp_msg *msg, const void *peer)
> +{
> +	struct rte_eth_dev *dev;
> +	struct pmd_process_private *proc_private;
> +	const struct mp_region_msg *msg_param = (const struct mp_region_msg *)msg->param;
> +	struct rte_mp_msg reply;
> +	struct mp_region_msg *reply_param = (struct mp_region_msg *)reply.param;
> +	uint16_t port_id;
> +	int ret;
> +
> +	/* Get requested port */
> +	ret = rte_eth_dev_get_port_by_name(msg_param->port_name, &port_id);
> +	if (ret) {
> +		MIF_LOG(ERR, "Failed to get port id for %s",
> +			msg_param->port_name);
> +		return -1;
> +	}
> +	dev = &rte_eth_devices[port_id];
> +	proc_private = dev->process_private;
> +
> +	memset(&reply, 0, sizeof(reply));
> +	strlcpy(reply.name, msg->name, sizeof(reply.name));
> +	reply_param->idx = msg_param->idx;
> +	if (proc_private->regions[msg_param->idx] != NULL) {
> +		reply_param->size = proc_private->regions[msg_param->idx]->region_size;
> +		reply.fds[0] = proc_private->regions[msg_param->idx]->fd;
> +		reply.num_fds = 1;
> +	}
> +	reply.len_param = sizeof(*reply_param);
> +	if (rte_mp_reply(&reply, peer) < 0) {
> +		MIF_LOG(ERR, "Failed to reply to an add region request");
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +/*
> + * Request regions
> + * Called by secondary process, when ports link status goes up.
> + */
> +static int
> +memif_mp_request_regions(struct rte_eth_dev *dev)
> +{
> +	int ret, i;
> +	struct timespec timeout = {.tv_sec = 5, .tv_nsec = 0};
> +	struct rte_mp_msg msg, *reply;
> +	struct rte_mp_reply replies;
> +	struct mp_region_msg *msg_param = (struct mp_region_msg *)msg.param;
> +	struct mp_region_msg *reply_param;
> +	struct memif_region *r;
> +	struct pmd_process_private *proc_private = dev->process_private;
> +
> +	MIF_LOG(DEBUG, "Requesting memory regions");
> +
> +	for (i = 0; i < ETH_MEMIF_MAX_REGION_NUM; i++) {
> +		/* Prepare the message */
> +		memset(&msg, 0, sizeof(msg));
> +		strlcpy(msg.name, MEMIF_MP_SEND_REGION, sizeof(msg.name));
> +		strlcpy(msg_param->port_name, dev->data->name,
> +			sizeof(msg_param->port_name));
> +		msg_param->idx = i;
> +		msg.len_param = sizeof(*msg_param);
> +
> +		/* Send message */
> +		ret = rte_mp_request_sync(&msg, &replies, &timeout);
> +		if (ret < 0 || replies.nb_received != 1) {
> +			MIF_LOG(ERR, "Failed to send mp msg: %d",
> +				rte_errno);
> +			return -1;
> +		}
> +
> +		reply = &replies.msgs[0];
> +		reply_param = (struct mp_region_msg *)reply->param;
> +
> +		if (reply_param->size > 0) {
> +			r = rte_zmalloc("region", sizeof(struct memif_region), 0);
> +			if (r == NULL) {
> +				MIF_LOG(ERR, "Failed to alloc memif region.");
> +				free(reply);
> +				return -ENOMEM;
> +			}
> +			r->region_size = reply_param->size;
> +			if (reply->num_fds < 1) {
> +				MIF_LOG(ERR, "Missing file descriptor.");
> +				free(reply);
> +				return -1;
> +			}
> +			r->fd = reply->fds[0];
> +			r->addr = NULL;
> +
> +			proc_private->regions[reply_param->idx] = r;
> +			proc_private->regions_num++;
> +		}
> +		free(reply);
> +	}
> +
> +	return memif_connect(dev);
> +}
> +

On the multiprocess/IPC part,

Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>

Please bear in mind that i did not look at other sections of the code.

-- 
Thanks,
Anatoly

  reply	other threads:[~2019-06-25 10:26 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-13  6:42 [dpdk-dev] [PATCH v1] " Jakub Grajciar
2019-06-13  9:40 ` Burakov, Anatoly
2019-06-18  8:48 ` [dpdk-dev] [PATCH v2] " Jakub Grajciar
2019-06-18  9:20   ` Burakov, Anatoly
2019-06-25 10:05   ` [dpdk-dev] [PATCH v3] " Jakub Grajciar
2019-06-25 10:26     ` Burakov, Anatoly [this message]
2019-06-28 18:30       ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=88888329-70b7-0f8d-4943-253254041edf@intel.com \
    --to=anatoly.burakov@intel.com \
    --cc=dev@dpdk.org \
    --cc=jgrajcia@cisco.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).