DPDK patches and discussions
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen@networkplumber.org>
To: dev@dpdk.org
Cc: Stephen Hemminger <stephen@networkplumber.org>,
	Junlong Wang <wang.junlong1@zte.com.cn>,
	Lijie Shan <shan.lijie@zte.com.cn>
Subject: [PATCH] net/zxdh: avoid use of rte_memcpy
Date: Fri, 26 Sep 2025 11:10:17 -0700	[thread overview]
Message-ID: <20250926181017.470585-1-stephen@networkplumber.org> (raw)

The regular library memcpy has more error checking and is as
fast or faster for small fixed sizes. Use struct assignment
for even more type checking.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/zxdh/zxdh_flow.c | 53 ++++++++++++++++--------------------
 1 file changed, 23 insertions(+), 30 deletions(-)

diff --git a/drivers/net/zxdh/zxdh_flow.c b/drivers/net/zxdh/zxdh_flow.c
index 7a2e20ac39..1167f53641 100644
--- a/drivers/net/zxdh/zxdh_flow.c
+++ b/drivers/net/zxdh/zxdh_flow.c
@@ -94,9 +94,9 @@ static void
 zxdh_adjust_flow_op_rsp_memory_layout(void *old_data,
 		size_t old_size, void *new_data)
 {
-	rte_memcpy(new_data, old_data, sizeof(struct zxdh_flow));
+	memcpy(new_data, old_data, sizeof(struct zxdh_flow));
 	memset((char *)new_data + sizeof(struct zxdh_flow), 0, 4);
-	rte_memcpy((char *)new_data + sizeof(struct zxdh_flow) + 4,
+	memcpy((char *)new_data + sizeof(struct zxdh_flow) + 4,
 		(char *)old_data + sizeof(struct zxdh_flow),
 		old_size - sizeof(struct zxdh_flow));
 }
@@ -1328,14 +1328,11 @@ static int fd_flow_parse_pattern(struct rte_eth_dev *dev, const struct rte_flow_
 						ipv4_mask->hdr.type_of_service;
 				key->frag_flag = (ipv4_spec->hdr.fragment_offset != 0) ? 1 : 0;
 				key_mask->frag_flag = (ipv4_mask->hdr.fragment_offset != 0) ? 1 : 0;
-				rte_memcpy((uint32_t *)key->src_ip + 3,
-							 &ipv4_spec->hdr.src_addr, 4);
-				rte_memcpy((uint32_t *)key->dst_ip + 3,
-							 &ipv4_spec->hdr.dst_addr, 4);
-				rte_memcpy((uint32_t *)key_mask->src_ip + 3,
-							 &ipv4_mask->hdr.src_addr, 4);
-				rte_memcpy((uint32_t *)key_mask->dst_ip + 3,
-							 &ipv4_mask->hdr.dst_addr, 4);
+
+				memcpy((uint32_t *)key->src_ip + 3, &ipv4_spec->hdr.src_addr, 4);
+				memcpy((uint32_t *)key->dst_ip + 3, &ipv4_spec->hdr.dst_addr, 4);
+				memcpy((uint32_t *)key_mask->src_ip + 3, &ipv4_mask->hdr.src_addr, 4);
+				memcpy((uint32_t *)key_mask->dst_ip + 3, &ipv4_mask->hdr.dst_addr, 4);
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -1364,14 +1361,10 @@ static int fd_flow_parse_pattern(struct rte_eth_dev *dev, const struct rte_flow_
 				key->nw_proto = ipv6_spec->hdr.proto;
 				key_mask->nw_proto = ipv6_mask->hdr.proto;
 
-				rte_memcpy(key->src_ip,
-							 &ipv6_spec->hdr.src_addr, 16);
-				rte_memcpy(key->dst_ip,
-							 &ipv6_spec->hdr.dst_addr, 16);
-				rte_memcpy(key_mask->src_ip,
-							 &ipv6_mask->hdr.src_addr, 16);
-				rte_memcpy(key_mask->dst_ip,
-							 &ipv6_mask->hdr.dst_addr, 16);
+				memcpy(key->src_ip, &ipv6_spec->hdr.src_addr, 16);
+				memcpy(key->dst_ip, &ipv6_spec->hdr.dst_addr, 16);
+				memcpy(key_mask->src_ip, &ipv6_mask->hdr.src_addr, 16);
+				memcpy(key_mask->dst_ip, &ipv6_mask->hdr.dst_addr, 16);
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
@@ -1477,8 +1470,8 @@ static int fd_flow_parse_pattern(struct rte_eth_dev *dev, const struct rte_flow_
 								 "Invalid vxlan mask");
 					return -rte_errno;
 			}
-			rte_memcpy(key->vni, vxlan_spec->vni, 3);
-			rte_memcpy(key_mask->vni, vxlan_mask->vni, 3);
+			memcpy(key->vni, vxlan_spec->vni, 3);
+			memcpy(key_mask->vni, vxlan_mask->vni, 3);
 			break;
 		}
 		case RTE_FLOW_ACTION_TYPE_VOID:
@@ -1573,10 +1566,10 @@ fd_flow_parse_vxlan_encap(struct rte_eth_dev *dev __rte_unused,
 		switch (items->type) {
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			item_eth = items->spec;
-			rte_memcpy(&dh_flow->encap0.dst_mac1, item_eth->dst.addr_bytes, 2);
-			rte_memcpy(&dh_flow->encap1.src_mac1, item_eth->src.addr_bytes, 2);
-			rte_memcpy(&dh_flow->encap0.dst_mac2, &item_eth->dst.addr_bytes[2], 4);
-			rte_memcpy(&dh_flow->encap1.src_mac2, &item_eth->src.addr_bytes[2], 4);
+			memcpy(&dh_flow->encap0.dst_mac1, item_eth->dst.addr_bytes, 2);
+			memcpy(&dh_flow->encap1.src_mac1, item_eth->src.addr_bytes, 2);
+			memcpy(&dh_flow->encap0.dst_mac2, &item_eth->dst.addr_bytes[2], 4);
+			memcpy(&dh_flow->encap1.src_mac2, &item_eth->src.addr_bytes[2], 4);
 			dh_flow->encap0.dst_mac1 = rte_bswap16(dh_flow->encap0.dst_mac1);
 			dh_flow->encap1.src_mac1 = rte_bswap16(dh_flow->encap1.src_mac1);
 			dh_flow->encap0.dst_mac2 = rte_bswap32(dh_flow->encap0.dst_mac2);
@@ -1592,9 +1585,9 @@ fd_flow_parse_vxlan_encap(struct rte_eth_dev *dev __rte_unused,
 			dh_flow->encap0.tos = item_ipv4->hdr.type_of_service;
 			dh_flow->encap0.ttl = item_ipv4->hdr.time_to_live;
 			addr = rte_bswap32(item_ipv4->hdr.src_addr);
-			rte_memcpy((uint32_t *)dh_flow->encap1.sip.ip_addr + 3, &addr, 4);
+			memcpy((uint32_t *)dh_flow->encap1.sip.ip_addr + 3, &addr, 4);
 			addr = rte_bswap32(item_ipv4->hdr.dst_addr);
-			rte_memcpy((uint32_t *)dh_flow->encap0.dip.ip_addr + 3, &addr, 4);
+			memcpy((uint32_t *)dh_flow->encap0.dip.ip_addr + 3, &addr, 4);
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			item_ipv6 = items->spec;
@@ -1603,7 +1596,7 @@ fd_flow_parse_vxlan_encap(struct rte_eth_dev *dev __rte_unused,
 					(item_ipv6->hdr.vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
 						RTE_IPV6_HDR_TC_SHIFT;
 			dh_flow->encap0.ttl = item_ipv6->hdr.hop_limits;
-			rte_memcpy(dh_flow->encap1.sip.ip_addr, &item_ipv6->hdr.src_addr, 16);
+			memcpy(dh_flow->encap1.sip.ip_addr, &item_ipv6->hdr.src_addr, 16);
 			dh_flow->encap1.sip.ip_addr[0] =
 				rte_bswap32(dh_flow->encap1.sip.ip_addr[0]);
 			dh_flow->encap1.sip.ip_addr[1] =
@@ -1612,7 +1605,7 @@ fd_flow_parse_vxlan_encap(struct rte_eth_dev *dev __rte_unused,
 				rte_bswap32(dh_flow->encap1.sip.ip_addr[2]);
 			dh_flow->encap1.sip.ip_addr[3] =
 				rte_bswap32(dh_flow->encap1.sip.ip_addr[3]);
-			rte_memcpy(dh_flow->encap0.dip.ip_addr, &item_ipv6->hdr.dst_addr, 16);
+			memcpy(dh_flow->encap0.dip.ip_addr, &item_ipv6->hdr.dst_addr, 16);
 			dh_flow->encap0.dip.ip_addr[0] =
 					rte_bswap32(dh_flow->encap0.dip.ip_addr[0]);
 			dh_flow->encap0.dip.ip_addr[1] =
@@ -1884,7 +1877,7 @@ vf_flow_msg_process(enum zxdh_msg_type msg_type, struct rte_eth_dev *dev,
 	struct zxdh_flow_op_rsp *flow_rsp = (struct zxdh_flow_op_rsp *)flow_op_rsp;
 
 	dh_flow->hash_search_index = hw->hash_search_index;
-	rte_memcpy(&flow_msg->dh_flow, dh_flow, sizeof(struct zxdh_flow));
+	flow_msg->dh_flow = *dh_flow;
 
 	zxdh_msg_head_build(hw, msg_type, &msg_info);
 	ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),
@@ -1909,7 +1902,7 @@ vf_flow_msg_process(enum zxdh_msg_type msg_type, struct rte_eth_dev *dev,
 	if (msg_type == ZXDH_FLOW_HW_ADD)
 		dh_flow->flowentry.hw_idx = flow_rsp->dh_flow.flowentry.hw_idx;
 	if (count)
-		rte_memcpy((void *)count, &flow_rsp->count, sizeof(flow_rsp->count));
+		memcpy((void *)count, &flow_rsp->count, sizeof(flow_rsp->count));
 
 	return ret;
 }
-- 
2.47.3


                 reply	other threads:[~2025-09-26 18:10 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250926181017.470585-1-stephen@networkplumber.org \
    --to=stephen@networkplumber.org \
    --cc=dev@dpdk.org \
    --cc=shan.lijie@zte.com.cn \
    --cc=wang.junlong1@zte.com.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).