DPDK patches and discussions
 help / color / mirror / Atom feed
From: Matej Vido <matejvido@gmail.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 2/5] szedata2: add handling of scattered packets in RX
Date: Fri, 18 Sep 2015 10:32:49 +0200	[thread overview]
Message-ID: <1442565172-5338-3-git-send-email-matejvido@gmail.com> (raw)
In-Reply-To: <1442565172-5338-1-git-send-email-matejvido@gmail.com>

Add new RX function for handling scattered packets.

Signed-off-by: Matej Vido <matejvido@gmail.com>
Reviewed-by: Jan Viktorin <viktorin@rehivetech.com>
---
 drivers/net/szedata2/rte_eth_szedata2.c | 356 +++++++++++++++++++++++++++++++-
 1 file changed, 354 insertions(+), 2 deletions(-)

diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 4db1287..ddb45e4 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -362,6 +362,343 @@ eth_szedata2_rx(void *queue,
 }
 
 static uint16_t
+eth_szedata2_rx_scattered(void *queue,
+		struct rte_mbuf **bufs,
+		uint16_t nb_pkts)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+	struct szedata2_rx_queue *sze_q = queue;
+	struct rte_pktmbuf_pool_private *mbp_priv;
+	uint16_t num_rx = 0;
+	uint16_t buf_size;
+	uint16_t sg_size;
+	uint16_t hw_size;
+	uint16_t packet_size;
+	uint64_t num_bytes = 0;
+	struct szedata * sze = sze_q->sze;
+	uint8_t * header_ptr = NULL; /* header of packet */
+	uint8_t * packet_ptr1 = NULL;
+	uint8_t * packet_ptr2 = NULL;
+	uint16_t packet_len1 = 0;
+	uint16_t packet_len2 = 0;
+	uint16_t hw_data_align;
+
+	if (unlikely(sze_q->sze == NULL || nb_pkts == 0)) {
+		return 0;
+	}
+
+	/*
+	 * Reads the given number of packets from szedata2 channel given
+	 * by queue and copies the packet data into a newly allocated mbuf
+	 * to return.
+	 */
+	for (i = 0; i < nb_pkts; i++) {
+		const struct szedata_lock * ct_rx_lck_backup;
+		unsigned int ct_rx_rem_bytes_backup;
+		unsigned char * ct_rx_cur_ptr_backup;
+
+		/* get the next sze packet */
+		if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
+				sze->ct_rx_lck->next == NULL) {
+			/* unlock old data */
+			szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
+			sze->ct_rx_lck_orig = NULL;
+			sze->ct_rx_lck = NULL;
+		}
+
+		/*
+		 * Store items from sze structure which can be changed
+		 * before mbuf allocating. Use these items in case of mbuf
+		 * allocating failure.
+		 */
+		ct_rx_lck_backup = sze->ct_rx_lck;
+		ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
+		ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
+
+		if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
+			/* nothing to read, lock new data */
+			sze->ct_rx_lck_orig = sze->ct_rx_lck =
+				szedata_rx_lock_data(sze_q->sze, ~0U);
+
+			/*
+			 * Backup items from sze structure must be updated
+			 * after locking to contain pointers to new locks.
+			 */
+			ct_rx_lck_backup = sze->ct_rx_lck;
+			ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
+			ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
+
+			if (sze->ct_rx_lck == NULL) {
+				/* nothing to lock */
+				break;
+			}
+
+			sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+			sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
+
+			if (!sze->ct_rx_rem_bytes) {
+				break;
+			}
+		}
+
+		if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
+			/*
+			 * cut in header - copy parts of header to merge buffer
+			 */
+			if (sze->ct_rx_lck->next == NULL) {
+				break;
+			}
+
+			/* copy first part of header */
+			rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
+					sze->ct_rx_rem_bytes);
+
+			/* copy second part of header */
+			sze->ct_rx_lck = sze->ct_rx_lck->next;
+			sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+			rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
+				sze->ct_rx_cur_ptr,
+				RTE_SZE2_PACKET_HEADER_SIZE -
+				sze->ct_rx_rem_bytes);
+
+			sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
+				sze->ct_rx_rem_bytes;
+			sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+				RTE_SZE2_PACKET_HEADER_SIZE +
+				sze->ct_rx_rem_bytes;
+
+			header_ptr = (uint8_t *) sze->ct_rx_buffer;
+		} else {
+			/* not cut */
+			header_ptr = (uint8_t *) sze->ct_rx_cur_ptr;
+			sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
+			sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
+		}
+
+		sg_size = le16toh(*((uint16_t *)header_ptr));
+		hw_size = le16toh(*(((uint16_t *)header_ptr)+1));
+		packet_size = sg_size -
+			RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
+
+
+		/* checks if packet all right */
+		if (!sg_size) {
+			errx(5, "Zero segsize");
+		}
+
+		/* check sg_size and hwsize */
+		if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
+			errx(10, "Hwsize bigger than expected. Segsize: %d, "
+					"hwsize: %d", sg_size, hw_size);
+		}
+
+		hw_data_align =
+			RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE +
+			hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE;
+
+		if (sze->ct_rx_rem_bytes >=
+				(uint16_t)(sg_size -
+				RTE_SZE2_PACKET_HEADER_SIZE)) {
+			/* no cut */
+			/* one packet ready - go to another */
+			packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
+			packet_len1 = packet_size;
+			packet_ptr2 = NULL;
+			packet_len2 = 0;
+
+			sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
+				RTE_SZE2_PACKET_HEADER_SIZE;
+			sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
+				RTE_SZE2_PACKET_HEADER_SIZE;
+		} else {
+			/* cut in data */
+			if (sze->ct_rx_lck->next == NULL) {
+				errx(6, "Need \"next\" lock, but it is "
+					"missing: %u", sze->ct_rx_rem_bytes);
+			}
+
+			/* skip hw data */
+			if (sze->ct_rx_rem_bytes <= hw_data_align) {
+				uint16_t rem_size = hw_data_align -
+					sze->ct_rx_rem_bytes;
+
+				/* MOVE to next lock */
+				sze->ct_rx_lck = sze->ct_rx_lck->next;
+				sze->ct_rx_cur_ptr =
+					(void *) (((uint8_t *)
+					(sze->ct_rx_lck->start)) + rem_size);
+
+				packet_ptr1 = sze->ct_rx_cur_ptr;
+				packet_len1 = packet_size;
+				packet_ptr2 = NULL;
+				packet_len2 = 0;
+
+				sze->ct_rx_cur_ptr +=
+					RTE_SZE2_ALIGN8(packet_size);
+				sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+					rem_size - RTE_SZE2_ALIGN8(packet_size);
+			} else {
+				/* get pointer and length from first part */
+				packet_ptr1 = sze->ct_rx_cur_ptr +
+					hw_data_align;
+				packet_len1 = sze->ct_rx_rem_bytes -
+					hw_data_align;
+
+				/* MOVE to next lock */
+				sze->ct_rx_lck = sze->ct_rx_lck->next;
+				sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
+
+				/* get pointer and length from second part */
+				packet_ptr2 = sze->ct_rx_cur_ptr;
+				packet_len2 = packet_size - packet_len1;
+
+				sze->ct_rx_cur_ptr +=
+					RTE_SZE2_ALIGN8(packet_size) -
+					packet_len1;
+				sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
+					(RTE_SZE2_ALIGN8(packet_size) -
+					 packet_len1);
+			}
+		}
+
+		if (unlikely(packet_ptr1 == NULL)) {
+			break;
+		}
+
+		mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
+
+		if (unlikely(mbuf == NULL)) {
+			/*
+			 * Restore items from sze structure to state after
+			 * unlocking (eventually locking).
+			 */
+			sze->ct_rx_lck = ct_rx_lck_backup;
+			sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
+			sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
+			break;
+		}
+
+		/* get the space available for data in the mbuf */
+		mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
+		buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+				RTE_PKTMBUF_HEADROOM);
+
+		if (packet_size <= buf_size) {
+			/* sze packet will fit in one mbuf, go ahead and copy */
+			rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
+					packet_ptr1, packet_len1);
+			if (packet_ptr2 != NULL) {
+				rte_memcpy((void *)
+					(rte_pktmbuf_mtod(mbuf, uint8_t *) +
+					packet_len1), packet_ptr2, packet_len2);
+			}
+			mbuf->data_len = (uint16_t)packet_size;
+		} else {
+			/*
+			 * sze packet will not fit in one mbuf,
+			 * scatter packet into more mbufs
+			 */
+			struct rte_mbuf * m = mbuf;
+			uint16_t len = rte_pktmbuf_tailroom(mbuf);
+
+			/* copy first part of packet */
+			/* fill first mbuf */
+			rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
+				len);
+			packet_len1 -= len;
+			packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
+
+			while (packet_len1 > 0) {
+				/* fill new mbufs */
+				m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
+
+				if (unlikely(m->next == NULL)) {
+					rte_pktmbuf_free(mbuf);
+					/*
+					 * Restore items from sze structure
+					 * to state after unlocking (eventually
+					 * locking).
+					 */
+					sze->ct_rx_lck = ct_rx_lck_backup;
+					sze->ct_rx_rem_bytes =
+						ct_rx_rem_bytes_backup;
+					sze->ct_rx_cur_ptr =
+						ct_rx_cur_ptr_backup;
+					goto finish;
+				}
+
+				m = m->next;
+
+				len = RTE_MIN(rte_pktmbuf_tailroom(m),
+					packet_len1);
+				rte_memcpy(rte_pktmbuf_append(mbuf, len),
+					packet_ptr1, len);
+
+				(mbuf->nb_segs)++;
+				packet_len1 -= len;
+				packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
+			}
+
+			if (packet_ptr2 != NULL) {
+				/* copy second part of packet, if exists */
+				/* fill the rest of currently last mbuf */
+				len = rte_pktmbuf_tailroom(m);
+				rte_memcpy(rte_pktmbuf_append(mbuf, len),
+					packet_ptr2, len);
+				packet_len2 -= len;
+				packet_ptr2 = ((uint8_t *)packet_ptr2) + len;
+
+				while (packet_len2 > 0) {
+					/* fill new mbufs */
+					m->next = rte_pktmbuf_alloc(
+							sze_q->mb_pool);
+
+					if (unlikely(m->next == NULL)) {
+						rte_pktmbuf_free(mbuf);
+						/*
+						 * Restore items from sze
+						 * structure to state after
+						 * unlocking (eventually
+						 * locking).
+						 */
+						sze->ct_rx_lck =
+							ct_rx_lck_backup;
+						sze->ct_rx_rem_bytes =
+							ct_rx_rem_bytes_backup;
+						sze->ct_rx_cur_ptr =
+							ct_rx_cur_ptr_backup;
+						goto finish;
+					}
+
+					m = m->next;
+
+					len = RTE_MIN(rte_pktmbuf_tailroom(m),
+						packet_len2);
+					rte_memcpy(rte_pktmbuf_append(mbuf,len),
+							packet_ptr2, len);
+
+					(mbuf->nb_segs)++;
+					packet_len2 -= len;
+					packet_ptr2 = ((uint8_t *)packet_ptr2) +
+						len;
+				}
+			}
+		}
+		mbuf->pkt_len = packet_size;
+		mbuf->port = sze_q->in_port;
+		bufs[num_rx] = mbuf;
+		num_rx++;
+		num_bytes += packet_size;
+	}
+
+finish:
+	sze_q->rx_pkts += num_rx;
+	sze_q->rx_bytes += num_bytes;
+	return num_rx;
+}
+
+static uint16_t
 eth_szedata2_tx(void *queue,
 		struct rte_mbuf **bufs,
 		uint16_t nb_pkts)
@@ -787,8 +1124,16 @@ eth_dev_stop(struct rte_eth_dev *dev)
 }
 
 static int
-eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+eth_dev_configure(struct rte_eth_dev *dev)
 {
+	struct rte_eth_dev_data * data = dev->data;
+	if (data->dev_conf.rxmode.enable_scatter == 1) {
+		dev->rx_pkt_burst = eth_szedata2_rx_scattered;
+		data->scattered_rx = 1;
+	} else {
+		dev->rx_pkt_burst = eth_szedata2_rx;
+		data->scattered_rx = 0;
+	}
 	return 0;
 }
 
@@ -1108,7 +1453,14 @@ rte_eth_from_szedata2(const char *name,
 		return -1;
 	}
 
-	eth_dev->rx_pkt_burst = eth_szedata2_rx;
+	if (data->dev_conf.rxmode.enable_scatter == 1 ||
+		data->scattered_rx == 1) {
+		eth_dev->rx_pkt_burst = eth_szedata2_rx_scattered;
+		data->scattered_rx = 1;
+	} else {
+		eth_dev->rx_pkt_burst = eth_szedata2_rx;
+		data->scattered_rx = 0;
+	}
 	eth_dev->tx_pkt_burst = eth_szedata2_tx;
 
 	return 0;
-- 
1.9.1

  parent reply	other threads:[~2015-09-18  8:34 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-19  8:24 [dpdk-dev] [PATCH 0/2] Virtual PMD using sze2 layer for COMBO cards Matej Vido
2015-06-19  8:25 ` [dpdk-dev] [PATCH 1/2] szedata2: new poll mode driver Matej Vido
2016-03-21 17:45   ` Stephen Hemminger
2016-03-24 17:52     ` Matej Vido
2015-06-19  8:25 ` [dpdk-dev] [PATCH 2/2] doc: added documentation for szedata2 PMD Matej Vido
2015-09-18  8:32 ` [dpdk-dev] [PATCH v2 0/5] Virtual PMD using sze2 layer for COMBO cards Matej Vido
2015-09-18  8:32   ` [dpdk-dev] [PATCH v2 1/5] szedata2: add new poll mode driver Matej Vido
2015-10-26 15:10     ` Thomas Monjalon
2015-09-18  8:32   ` Matej Vido [this message]
2015-09-18  8:32   ` [dpdk-dev] [PATCH v2 3/5] szedata2: add handling of scattered packets in TX Matej Vido
2015-10-26 14:55     ` Thomas Monjalon
2015-10-27 17:40       ` Matej Vido
2015-09-18  8:32   ` [dpdk-dev] [PATCH v2 4/5] doc: add documentation for szedata2 PMD Matej Vido
2015-10-26 15:00     ` Thomas Monjalon
2015-10-26 15:09     ` Thomas Monjalon
2015-10-27 17:33       ` Matej Vido
2015-10-27 18:00         ` Thomas Monjalon
2015-11-02 14:26           ` Matej Vido
2015-10-30 12:16     ` Mcnamara, John
2015-11-06 14:34       ` Matej Vido
2015-09-18  8:32   ` [dpdk-dev] [PATCH v2 5/5] doc: update 2.2 release notes Matej Vido
2015-09-24 16:23     ` [dpdk-dev] [PATCH v2] doc: update the dpdk " John McNamara
2015-09-24 21:14       ` Thomas Monjalon
2015-11-10 14:18   ` [dpdk-dev] [PATCH v3 0/6] Virtual PMD using sze2 layer for COMBO cards Matej Vido
2015-11-10 14:18     ` [dpdk-dev] [PATCH v3 1/6] szedata2: add new poll mode driver Matej Vido
2015-11-20 15:04       ` Thomas Monjalon
2015-11-20 19:25         ` Matej Vido
2015-11-21 10:27           ` Thomas Monjalon
2015-11-10 14:18     ` [dpdk-dev] [PATCH v3 2/6] szedata2: add non-scattered RX function Matej Vido
2015-11-10 14:18     ` [dpdk-dev] [PATCH v3 3/6] szedata2: add TX function Matej Vido
2015-11-10 14:18     ` [dpdk-dev] [PATCH v3 4/6] szedata2: add support for scattered packets in RX Matej Vido
2015-11-10 14:18     ` [dpdk-dev] [PATCH v3 5/6] doc: add documentation for szedata2 PMD Matej Vido
2015-11-10 14:55       ` Mcnamara, John
2015-11-10 14:18     ` [dpdk-dev] [PATCH v3 6/6] doc: update 2.2 release notes Matej Vido
2015-11-16 14:22       ` Mcnamara, John
2015-11-20 16:19     ` [dpdk-dev] [PATCH v3 0/6] Virtual PMD using sze2 layer for COMBO cards Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1442565172-5338-3-git-send-email-matejvido@gmail.com \
    --to=matejvido@gmail.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).