DPDK patches and discussions
 help / color / mirror / Atom feed
From: Yuying Zhang <yuying.zhang@intel.com>
To: dev@dpdk.org, beilei.xing@intel.com, qi.z.zhang@intel.com,
	jingjing.wu@intel.com
Cc: Yuying Zhang <yuying.zhang@intel.com>
Subject: [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle
Date: Sat, 12 Aug 2023 07:55:04 +0000	[thread overview]
Message-ID: <20230812075506.361769-4-yuying.zhang@intel.com> (raw)
In-Reply-To: <20230812075506.361769-1-yuying.zhang@intel.com>

Add cpfl driver control queue message handle, including
send/receive/clean/post_rx_buffs.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 419 ++++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_controlq.h |  24 ++
 2 files changed, 442 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
index 97a6bdd042..c696a529a7 100644
--- a/drivers/net/cpfl/cpfl_controlq.c
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2023 Intel Corporation
+ * Copyright(c) 2023 Intel Corporation
  */
 
 #include "cpfl_controlq.h"
@@ -332,6 +332,395 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 	return status;
 }
 
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+		uint64_t msg_cookie;
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		msg_cookie = *(u64 *)&msg->cookie;
+		desc->cookie_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+		desc->cookie_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+					   IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+	if (tbp == cq->next_to_clean)
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
 int
 cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 		    struct idpf_ctlq_info **cq)
@@ -378,3 +767,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
 {
 	cpfl_ctlq_remove(hw, cq);
 }
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
index 930d717f63..3fd658cc36 100644
--- a/drivers/net/cpfl/cpfl_controlq.h
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -14,6 +14,13 @@
 #define CPFL_DFLT_MBX_RING_LEN		512
 #define CPFL_CFGQ_RING_LEN		512
 
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
 /* Generic queue info structures */
 /* MB, CONFIG and EVENT q do not have extended info */
 struct cpfl_ctlq_create_info {
@@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
 int cpfl_ctlq_add(struct idpf_hw *hw,
 		  struct cpfl_ctlq_create_info *qinfo,
 		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    uint16_t *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
 int cpfl_vport_ctlq_add(struct idpf_hw *hw,
 			struct cpfl_ctlq_create_info *qinfo,
 			struct idpf_ctlq_info **cq);
 void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  uint16_t *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
 #endif
-- 
2.25.1


  parent reply	other threads:[~2023-08-12  7:32 UTC|newest]

Thread overview: 128+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-12  7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
2023-08-12  7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
2023-08-25  3:55   ` Xing, Beilei
2023-08-12  7:55 ` [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Yuying Zhang
2023-08-25  5:55   ` Xing, Beilei
2023-08-12  7:55 ` Yuying Zhang [this message]
2023-08-25  6:23   ` [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Xing, Beilei
2023-08-12  7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
2023-08-25  7:35   ` Xing, Beilei
2023-08-25  8:42   ` Xing, Beilei
2023-08-12  7:55 ` [PATCH v1 5/5] net/cpfl: add fxp flow engine Yuying Zhang
2023-08-25  9:15   ` Xing, Beilei
2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 2/8] net/cpfl: add flow json parser Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 3/8] net/cpfl: add FXP low level implementation Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 4/8] net/cpfl: setup ctrl path Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 5/8] net/cpfl: set up rte flow skeleton Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 6/8] net/cpfl: add fxp rule module Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 7/8] net/cpfl: add fxp flow engine Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 8/8] net/cpfl: add flow support for representor Yuying Zhang
2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 15:11         ` Stephen Hemminger
2023-08-15 16:50       ` [PATCH v4 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
2023-09-06  9:33     ` [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs Wenjing Qiao
2023-09-11  0:48       ` Wu, Jingjing
2023-09-06  9:34     ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
2023-09-08  6:26       ` Liu, Mingxia
2023-09-11  6:24       ` Wu, Jingjing
2023-09-06  9:34     ` [PATCH v3 3/9] net/cpfl: add FXP low level implementation Wenjing Qiao
2023-09-06  9:34     ` [PATCH v3 4/9] net/cpfl: setup ctrl path Wenjing Qiao
2023-09-11  6:30       ` Liu, Mingxia
2023-09-11  6:36       ` Wu, Jingjing
2023-09-06  9:34     ` [PATCH v3 5/9] net/cpfl: set up rte flow skeleton Wenjing Qiao
2023-09-06  9:34     ` [PATCH v3 6/9] net/cpfl: add fxp rule module Wenjing Qiao
2023-09-12  7:40       ` FW: " Liu, Mingxia
2023-09-06  9:34     ` [PATCH v3 7/9] net/cpfl: add fxp flow engine Wenjing Qiao
2023-09-06  9:34     ` [PATCH v3 8/9] net/cpfl: add flow support for representor Wenjing Qiao
2023-09-06  9:34     ` [PATCH v3 9/9] app/test-pmd: refine encap content Wenjing Qiao
2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 2/8] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 3/8] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 4/8] net/cpfl: set up control path Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 5/8] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 6/8] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 7/8] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 8/8] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
2023-09-26 18:16           ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-26 19:03             ` Stephen Hemminger
2023-09-27  1:21               ` Zhang, Qi Z
2023-09-26 18:16           ` [PATCH v7 2/8] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-26 18:16           ` [PATCH v7 3/8] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-26 18:16           ` [PATCH v7 4/8] net/cpfl: set up control path yuying.zhang
2023-09-26 18:17           ` [PATCH v7 5/8] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-26 18:17           ` [PATCH v7 6/8] net/cpfl: add fxp rule module yuying.zhang
2023-09-28  3:29             ` Zhang, Qi Z
2023-09-26 18:17           ` [PATCH v7 7/8] net/cpfl: add fxp flow engine yuying.zhang
2023-09-26 18:17           ` [PATCH v7 8/8] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
2023-09-27 12:54             ` [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-27 12:54             ` [PATCH v8 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-27 12:54             ` [PATCH v8 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-27 12:54             ` [PATCH v8 4/9] net/cpfl: set up control path yuying.zhang
2023-09-27 12:54             ` [PATCH v8 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-27 12:54             ` [PATCH v8 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-27 12:54             ` [PATCH v8 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-27 12:54             ` [PATCH v8 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54             ` [PATCH v8 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28  3:37             ` [PATCH v8 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-09-28  8:44               ` [PATCH v9 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-28  8:44               ` [PATCH v9 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-28  8:44               ` [PATCH v9 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-10-15 13:01                 ` Thomas Monjalon
2023-10-16  3:07                   ` Zhang, Qi Z
2023-09-28  8:44               ` [PATCH v9 4/9] net/cpfl: set up control path yuying.zhang
2023-09-28  8:44               ` [PATCH v9 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-28  8:44               ` [PATCH v9 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-28  8:44               ` [PATCH v9 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-28  8:44               ` [PATCH v9 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-28  8:44               ` [PATCH v9 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28 12:45               ` [PATCH v9 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28 16:04               ` Stephen Hemminger
2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-10-10  1:31                 ` [PATCH v10 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-10-15 11:21               ` [PATCH v9 " Thomas Monjalon
2023-09-15 10:00       ` [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 11:14         ` Zhang, Qi Z
2023-09-15 10:00       ` [PATCH v5 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-15 11:19         ` Zhang, Qi Z
2023-09-15 10:00       ` [PATCH v5 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230812075506.361769-4-yuying.zhang@intel.com \
    --to=yuying.zhang@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).