DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] avoid AltiVec keyword vector
@ 2022-05-03 12:03 Thomas Monjalon
  2022-05-03 12:30 ` Bruce Richardson
  2022-05-04 20:40 ` David Christensen
  0 siblings, 2 replies; 6+ messages in thread
From: Thomas Monjalon @ 2022-05-03 12:03 UTC (permalink / raw)
  To: dev
  Cc: David Christensen, Beilei Xing, Matan Azrad,
	Viacheslav Ovsiienko, Maxime Coquelin, Chenbo Xia,
	Bruce Richardson, Vladimir Medvedkin

The keyword "vector" may conflict easily.
As a rule, it is better to use the alternative keyword "__vector".

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
---
 drivers/net/i40e/i40e_rxtx_vec_altivec.c      |  214 ++--
 drivers/net/mlx5/mlx5_rxtx_vec_altivec.h      | 1078 ++++++++---------
 .../net/virtio/virtio_rxtx_simple_altivec.c   |   70 +-
 examples/l3fwd/l3fwd_lpm_altivec.h            |   14 +-
 lib/eal/ppc/include/rte_vect.h                |    2 +-
 lib/lpm/rte_lpm_altivec.h                     |    6 +-
 6 files changed, 692 insertions(+), 692 deletions(-)

diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
index 825834c5cd..2dfa04599c 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -27,10 +27,10 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
 	struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
 	struct rte_mbuf *mb0, *mb1;
 
-	vector unsigned long hdr_room = (vector unsigned long){
+	__vector unsigned long hdr_room = (__vector unsigned long){
 						RTE_PKTMBUF_HEADROOM,
 						RTE_PKTMBUF_HEADROOM};
-	vector unsigned long dma_addr0, dma_addr1;
+	__vector unsigned long dma_addr0, dma_addr1;
 
 	rxdp = rxq->rx_ring + rxq->rxrearm_start;
 
@@ -40,11 +40,11 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
 				 RTE_I40E_RXQ_REARM_THRESH) < 0) {
 		if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
 		    rxq->nb_rx_desc) {
-			dma_addr0 = (vector unsigned long){};
+			dma_addr0 = (__vector unsigned long){};
 			for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
 				rxep[i].mbuf = &rxq->fake_mbuf;
 				vec_st(dma_addr0, 0,
-				       (vector unsigned long *)&rxdp[i].read);
+				       (__vector unsigned long *)&rxdp[i].read);
 			}
 		}
 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
@@ -54,7 +54,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
 
 	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
 	for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
-		vector unsigned long vaddr0, vaddr1;
+		__vector unsigned long vaddr0, vaddr1;
 		uintptr_t p0, p1;
 
 		mb0 = rxep[0].mbuf;
@@ -72,8 +72,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
 		*(uint64_t *)p1 = rxq->mbuf_initializer;
 
 		/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
-		vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
-		vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
+		vaddr0 = vec_ld(0, (__vector unsigned long *)&mb0->buf_addr);
+		vaddr1 = vec_ld(0, (__vector unsigned long *)&mb1->buf_addr);
 
 		/* convert pa to dma_addr hdr/data */
 		dma_addr0 = vec_mergel(vaddr0, vaddr0);
@@ -84,8 +84,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
 		dma_addr1 = vec_add(dma_addr1, hdr_room);
 
 		/* flush desc with pa dma_addr */
-		vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
-		vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
+		vec_st(dma_addr0, 0, (__vector unsigned long *)&rxdp++->read);
+		vec_st(dma_addr1, 0, (__vector unsigned long *)&rxdp++->read);
 	}
 
 	rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
@@ -102,32 +102,32 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
 }
 
 static inline void
-desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(__vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
 {
-	vector unsigned int vlan0, vlan1, rss, l3_l4e;
+	__vector unsigned int vlan0, vlan1, rss, l3_l4e;
 
 	/* mask everything except RSS, flow director and VLAN flags
 	 * bit2 is for VLAN tag, bit11 for flow director indication
 	 * bit13:12 for RSS indication.
 	 */
-	const vector unsigned int rss_vlan_msk = (vector unsigned int){
+	const __vector unsigned int rss_vlan_msk = (__vector unsigned int){
 			(int32_t)0x1c03804, (int32_t)0x1c03804,
 			(int32_t)0x1c03804, (int32_t)0x1c03804};
 
 	/* map rss and vlan type to rss hash and vlan flag */
-	const vector unsigned char vlan_flags = (vector unsigned char){
+	const __vector unsigned char vlan_flags = (__vector unsigned char){
 			0, 0, 0, 0,
 			RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
 			0, 0, 0, 0,
 			0, 0, 0, 0};
 
-	const vector unsigned char rss_flags = (vector unsigned char){
+	const __vector unsigned char rss_flags = (__vector unsigned char){
 			0, RTE_MBUF_F_RX_FDIR, 0, 0,
 			0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
 			0, 0, 0, 0,
 			0, 0, 0, 0};
 
-	const vector unsigned char l3_l4e_flags = (vector unsigned char){
+	const __vector unsigned char l3_l4e_flags = (__vector unsigned char){
 			0,
 			RTE_MBUF_F_RX_IP_CKSUM_BAD,
 			RTE_MBUF_F_RX_L4_CKSUM_BAD,
@@ -139,23 +139,23 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
 					     | RTE_MBUF_F_RX_IP_CKSUM_BAD,
 			0, 0, 0, 0, 0, 0, 0, 0};
 
-	vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
-	vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
-	vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
+	vlan0 = (__vector unsigned int)vec_mergel(descs[0], descs[1]);
+	vlan1 = (__vector unsigned int)vec_mergel(descs[2], descs[3]);
+	vlan0 = (__vector unsigned int)vec_mergeh(vlan0, vlan1);
 
 	vlan1 = vec_and(vlan0, rss_vlan_msk);
-	vlan0 = (vector unsigned int)vec_perm(vlan_flags,
-					(vector unsigned char){},
-					*(vector unsigned char *)&vlan1);
+	vlan0 = (__vector unsigned int)vec_perm(vlan_flags,
+				(__vector unsigned char){},
+				*(__vector unsigned char *)&vlan1);
 
-	rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
-	rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
-					*(vector unsigned char *)&rss);
+	rss = vec_sr(vlan1, (__vector unsigned int){11, 11, 11, 11});
+	rss = (__vector unsigned int)vec_perm(rss_flags, (__vector unsigned char){},
+				*(__vector unsigned char *)&rss);
 
-	l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
-	l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
-					(vector unsigned char){},
-					*(vector unsigned char *)&l3_l4e);
+	l3_l4e = vec_sr(vlan1, (__vector unsigned int){22, 22, 22, 22});
+	l3_l4e = (__vector unsigned int)vec_perm(l3_l4e_flags,
+				(__vector unsigned char){},
+				*(__vector unsigned char *)&l3_l4e);
 
 	vlan0 = vec_or(vlan0, rss);
 	vlan0 = vec_or(vlan0, l3_l4e);
@@ -169,23 +169,23 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
 #define PKTLEN_SHIFT     10
 
 static inline void
-desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
+desc_to_ptype_v(__vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
 		uint32_t *ptype_tbl)
 {
-	vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
-	vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+	__vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+	__vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
 
-	ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
-	ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
+	ptype0 = vec_sr(ptype0, (__vector unsigned long){30, 30});
+	ptype1 = vec_sr(ptype1, (__vector unsigned long){30, 30});
 
 	rx_pkts[0]->packet_type =
-		ptype_tbl[(*(vector unsigned char *)&ptype0)[0]];
+		ptype_tbl[(*(__vector unsigned char *)&ptype0)[0]];
 	rx_pkts[1]->packet_type =
-		ptype_tbl[(*(vector unsigned char *)&ptype0)[8]];
+		ptype_tbl[(*(__vector unsigned char *)&ptype0)[8]];
 	rx_pkts[2]->packet_type =
-		ptype_tbl[(*(vector unsigned char *)&ptype1)[0]];
+		ptype_tbl[(*(__vector unsigned char *)&ptype1)[0]];
 	rx_pkts[3]->packet_type =
-		ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
+		ptype_tbl[(*(__vector unsigned char *)&ptype1)[8]];
 }
 
 /**
@@ -204,17 +204,17 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	uint16_t nb_pkts_recd;
 	int pos;
 	uint64_t var;
-	vector unsigned char shuf_msk;
+	__vector unsigned char shuf_msk;
 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
-	vector unsigned short crc_adjust = (vector unsigned short){
+	__vector unsigned short crc_adjust = (__vector unsigned short){
 		0, 0,         /* ignore pkt_type field */
 		rxq->crc_len, /* sub crc on pkt_len */
 		0,            /* ignore high-16bits of pkt_len */
 		rxq->crc_len, /* sub crc on data_len */
 		0, 0, 0       /* ignore non-length fields */
 		};
-	vector unsigned long dd_check, eop_check;
+	__vector unsigned long dd_check, eop_check;
 
 	/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
@@ -240,15 +240,15 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		return 0;
 
 	/* 4 packets DD mask */
-	dd_check = (vector unsigned long){0x0000000100000001ULL,
+	dd_check = (__vector unsigned long){0x0000000100000001ULL,
 					  0x0000000100000001ULL};
 
 	/* 4 packets EOP mask */
-	eop_check = (vector unsigned long){0x0000000200000002ULL,
+	eop_check = (__vector unsigned long){0x0000000200000002ULL,
 					   0x0000000200000002ULL};
 
 	/* mask to shuffle from desc. to mbuf */
-	shuf_msk = (vector unsigned char){
+	shuf_msk = (__vector unsigned char){
 		0xFF, 0xFF,   /* pkt_type set as unknown */
 		0xFF, 0xFF,   /* pkt_type set as unknown */
 		14, 15,       /* octet 15~14, low 16 bits pkt_len */
@@ -274,35 +274,35 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
 			pos += RTE_I40E_DESCS_PER_LOOP,
 			rxdp += RTE_I40E_DESCS_PER_LOOP) {
-		vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
-		vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
-		vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
-		vector unsigned long mbp1, mbp2; /* two mbuf pointer
+		__vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+		__vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+		__vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+		__vector unsigned long mbp1, mbp2; /* two mbuf pointer
 						  * in one XMM reg.
 						  */
 
 		/* B.1 load 2 mbuf point */
-		mbp1 = *(vector unsigned long *)&sw_ring[pos];
+		mbp1 = *(__vector unsigned long *)&sw_ring[pos];
 		/* Read desc statuses backwards to avoid race condition */
 		/* A.1 load desc[3] */
-		descs[3] = *(vector unsigned long *)(rxdp + 3);
+		descs[3] = *(__vector unsigned long *)(rxdp + 3);
 		rte_compiler_barrier();
 
 		/* B.2 copy 2 mbuf point into rx_pkts  */
-		*(vector unsigned long *)&rx_pkts[pos] = mbp1;
+		*(__vector unsigned long *)&rx_pkts[pos] = mbp1;
 
 		/* B.1 load 2 mbuf point */
-		mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+		mbp2 = *(__vector unsigned long *)&sw_ring[pos + 2];
 
 		/* A.1 load desc[2-0] */
-		descs[2] = *(vector unsigned long *)(rxdp + 2);
+		descs[2] = *(__vector unsigned long *)(rxdp + 2);
 		rte_compiler_barrier();
-		descs[1] = *(vector unsigned long *)(rxdp + 1);
+		descs[1] = *(__vector unsigned long *)(rxdp + 1);
 		rte_compiler_barrier();
-		descs[0] = *(vector unsigned long *)(rxdp);
+		descs[0] = *(__vector unsigned long *)(rxdp);
 
 		/* B.2 copy 2 mbuf point into rx_pkts  */
-		*(vector unsigned long *)&rx_pkts[pos + 2] =  mbp2;
+		*(__vector unsigned long *)&rx_pkts[pos + 2] =  mbp2;
 
 		if (split_packet) {
 			rte_mbuf_prefetch_part2(rx_pkts[pos]);
@@ -315,78 +315,78 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		rte_compiler_barrier();
 
 		/* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
-		const vector unsigned int len3 = vec_sl(
-			vec_ld(0, (vector unsigned int *)&descs[3]),
-			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+		const __vector unsigned int len3 = vec_sl(
+			vec_ld(0, (__vector unsigned int *)&descs[3]),
+			(__vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
 
-		const vector unsigned int len2 = vec_sl(
-			vec_ld(0, (vector unsigned int *)&descs[2]),
-			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+		const __vector unsigned int len2 = vec_sl(
+			vec_ld(0, (__vector unsigned int *)&descs[2]),
+			(__vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
 
 		/* merge the now-aligned packet length fields back in */
-		descs[3] = (vector unsigned long)len3;
-		descs[2] = (vector unsigned long)len2;
+		descs[3] = (__vector unsigned long)len3;
+		descs[2] = (__vector unsigned long)len2;
 
 		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
-		pkt_mb4 = vec_perm((vector unsigned char)descs[3],
-				  (vector unsigned char){}, shuf_msk);
-		pkt_mb3 = vec_perm((vector unsigned char)descs[2],
-				  (vector unsigned char){}, shuf_msk);
+		pkt_mb4 = vec_perm((__vector unsigned char)descs[3],
+				  (__vector unsigned char){}, shuf_msk);
+		pkt_mb3 = vec_perm((__vector unsigned char)descs[2],
+				  (__vector unsigned char){}, shuf_msk);
 
 		/* C.1 4=>2 filter staterr info only */
-		sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
-					(vector unsigned short)descs[2]);
+		sterr_tmp2 = vec_mergel((__vector unsigned short)descs[3],
+					(__vector unsigned short)descs[2]);
 		/* C.1 4=>2 filter staterr info only */
-		sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
-					(vector unsigned short)descs[0]);
+		sterr_tmp1 = vec_mergel((__vector unsigned short)descs[1],
+					(__vector unsigned short)descs[0]);
 		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
-		pkt_mb4 = (vector unsigned char)vec_sub(
-				(vector unsigned short)pkt_mb4, crc_adjust);
-		pkt_mb3 = (vector unsigned char)vec_sub(
-				(vector unsigned short)pkt_mb3, crc_adjust);
+		pkt_mb4 = (__vector unsigned char)vec_sub(
+				(__vector unsigned short)pkt_mb4, crc_adjust);
+		pkt_mb3 = (__vector unsigned char)vec_sub(
+				(__vector unsigned short)pkt_mb3, crc_adjust);
 
 		/* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
-		const vector unsigned int len1 = vec_sl(
-			vec_ld(0, (vector unsigned int *)&descs[1]),
-			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
-		const vector unsigned int len0 = vec_sl(
-			vec_ld(0, (vector unsigned int *)&descs[0]),
-			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+		const __vector unsigned int len1 = vec_sl(
+			vec_ld(0, (__vector unsigned int *)&descs[1]),
+			(__vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+		const __vector unsigned int len0 = vec_sl(
+			vec_ld(0, (__vector unsigned int *)&descs[0]),
+			(__vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
 
 		/* merge the now-aligned packet length fields back in */
-		descs[1] = (vector unsigned long)len1;
-		descs[0] = (vector unsigned long)len0;
+		descs[1] = (__vector unsigned long)len1;
+		descs[0] = (__vector unsigned long)len0;
 
 		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
-		pkt_mb2 = vec_perm((vector unsigned char)descs[1],
-				   (vector unsigned char){}, shuf_msk);
-		pkt_mb1 = vec_perm((vector unsigned char)descs[0],
-				   (vector unsigned char){}, shuf_msk);
+		pkt_mb2 = vec_perm((__vector unsigned char)descs[1],
+				(__vector unsigned char){}, shuf_msk);
+		pkt_mb1 = vec_perm((__vector unsigned char)descs[0],
+				(__vector unsigned char){}, shuf_msk);
 
 		/* C.2 get 4 pkts staterr value  */
-		staterr = (vector unsigned short)vec_mergeh(
+		staterr = (__vector unsigned short)vec_mergeh(
 				sterr_tmp1, sterr_tmp2);
 
 		/* D.3 copy final 3,4 data to rx_pkts */
 		vec_st(pkt_mb4, 0,
-		 (vector unsigned char *)&rx_pkts[pos + 3]
+		 (__vector unsigned char *)&rx_pkts[pos + 3]
 			->rx_descriptor_fields1
 		);
 		vec_st(pkt_mb3, 0,
-		 (vector unsigned char *)&rx_pkts[pos + 2]
+		 (__vector unsigned char *)&rx_pkts[pos + 2]
 			->rx_descriptor_fields1
 		);
 
 		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
-		pkt_mb2 = (vector unsigned char)vec_sub(
-				(vector unsigned short)pkt_mb2, crc_adjust);
-		pkt_mb1 = (vector unsigned char)vec_sub(
-				(vector unsigned short)pkt_mb1,	crc_adjust);
+		pkt_mb2 = (__vector unsigned char)vec_sub(
+				(__vector unsigned short)pkt_mb2, crc_adjust);
+		pkt_mb1 = (__vector unsigned char)vec_sub(
+				(__vector unsigned short)pkt_mb1,	crc_adjust);
 
 		/* C* extract and record EOP bit */
 		if (split_packet) {
-			vector unsigned char eop_shuf_mask =
-				(vector unsigned char){
+			__vector unsigned char eop_shuf_mask =
+				(__vector unsigned char){
 					0xFF, 0xFF, 0xFF, 0xFF,
 					0xFF, 0xFF, 0xFF, 0xFF,
 					0xFF, 0xFF, 0xFF, 0xFF,
@@ -394,19 +394,19 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 				};
 
 			/* and with mask to extract bits, flipping 1-0 */
-			vector unsigned char eop_bits = vec_and(
-				(vector unsigned char)vec_nor(staterr, staterr),
-				(vector unsigned char)eop_check);
+			__vector unsigned char eop_bits = vec_and(
+				(__vector unsigned char)vec_nor(staterr, staterr),
+				(__vector unsigned char)eop_check);
 			/* the staterr values are not in order, as the count
 			 * of dd bits doesn't care. However, for end of
 			 * packet tracking, we do care, so shuffle. This also
 			 * compresses the 32-bit values to 8-bit
 			 */
-			eop_bits = vec_perm(eop_bits, (vector unsigned char){},
+			eop_bits = vec_perm(eop_bits, (__vector unsigned char){},
 					    eop_shuf_mask);
 			/* store the resulting 32-bit value */
 			*split_packet = (vec_ld(0,
-					 (vector unsigned int *)&eop_bits))[0];
+					 (__vector unsigned int *)&eop_bits))[0];
 			split_packet += RTE_I40E_DESCS_PER_LOOP;
 
 			/* zero-out next pointers */
@@ -417,22 +417,22 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		}
 
 		/* C.3 calc available number of desc */
-		staterr = vec_and(staterr, (vector unsigned short)dd_check);
+		staterr = vec_and(staterr, (__vector unsigned short)dd_check);
 
 		/* D.3 copy final 1,2 data to rx_pkts */
 		vec_st(pkt_mb2, 0,
-		 (vector unsigned char *)&rx_pkts[pos + 1]
+		 (__vector unsigned char *)&rx_pkts[pos + 1]
 			->rx_descriptor_fields1
 		);
 		vec_st(pkt_mb1, 0,
-		 (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+		 (__vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
 		);
 		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
 		desc_to_olflags_v(descs, &rx_pkts[pos]);
 
 		/* C.4 calc available number of desc */
 		var = __builtin_popcountll((vec_ld(0,
-			(vector unsigned long *)&staterr)[0]));
+			(__vector unsigned long *)&staterr)[0]));
 		nb_pkts_recd += var;
 		if (likely(var != RTE_I40E_DESCS_PER_LOOP))
 			break;
@@ -533,9 +533,9 @@ vtx1(volatile struct i40e_tx_desc *txdp,
 		((uint64_t)flags  << I40E_TXD_QW1_CMD_SHIFT) |
 		((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
 
-	vector unsigned long descriptor = (vector unsigned long){
+	__vector unsigned long descriptor = (__vector unsigned long){
 		pkt->buf_iova + pkt->data_off, high_qw};
-	*(vector unsigned long *)txdp = descriptor;
+	*(__vector unsigned long *)txdp = descriptor;
 }
 
 static inline void
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index f6e434c165..683a8f9a6c 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -47,11 +47,11 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
 	uint16_t p = n & -2;
 
 	for (pos = 0; pos < p; pos += 2) {
-		vector unsigned char mbp;
+		__vector unsigned char mbp;
 
-		mbp = (vector unsigned char)vec_vsx_ld(0,
+		mbp = (__vector unsigned char)vec_vsx_ld(0,
 				(signed int const *)&elts[pos]);
-		*(vector unsigned char *)&pkts[pos] = mbp;
+		*(__vector unsigned char *)&pkts[pos] = mbp;
 	}
 	if (n & 1)
 		pkts[pos] = elts[pos];
@@ -78,15 +78,15 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 {
 	volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
 	struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
-	const vector unsigned char zero = (vector unsigned char){0};
+	const __vector unsigned char zero = (__vector unsigned char){0};
 	/* Mask to shuffle from extracted mini CQE to mbuf. */
-	const vector unsigned char shuf_mask1 = (vector unsigned char){
+	const __vector unsigned char shuf_mask1 = (__vector unsigned char){
 			-1, -1, -1, -1,   /* skip packet_type */
 			 7,  6, -1, -1,   /* bswap16, pkt_len */
 			 7,  6,           /* bswap16, data_len */
 			-1, -1,           /* skip vlan_tci */
 			 3,  2,  1,  0};  /* bswap32, rss */
-	const vector unsigned char shuf_mask2 = (vector unsigned char){
+	const __vector unsigned char shuf_mask2 = (__vector unsigned char){
 			-1, -1, -1, -1,   /* skip packet_type */
 			15, 14, -1, -1,   /* bswap16, pkt_len */
 			15, 14,           /* data_len, bswap16 */
@@ -95,30 +95,30 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 	/* Restore the compressed count. Must be 16 bits. */
 	const uint16_t mcqe_n = t_pkt->data_len +
 		(rxq->crc_present * RTE_ETHER_CRC_LEN);
-	const vector unsigned char rearm =
-		(vector unsigned char)vec_vsx_ld(0,
+	const __vector unsigned char rearm =
+		(__vector unsigned char)vec_vsx_ld(0,
 		(signed int const *)&t_pkt->rearm_data);
-	const vector unsigned char rxdf =
-		(vector unsigned char)vec_vsx_ld(0,
+	const __vector unsigned char rxdf =
+		(__vector unsigned char)vec_vsx_ld(0,
 		(signed int const *)&t_pkt->rx_descriptor_fields1);
-	const vector unsigned char crc_adj =
-		(vector unsigned char)(vector unsigned short){
+	const __vector unsigned char crc_adj =
+		(__vector unsigned char)(__vector unsigned short){
 			0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
 			rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0};
-	const vector unsigned short rxdf_sel_mask =
-		(vector unsigned short){
+	const __vector unsigned short rxdf_sel_mask =
+		(__vector unsigned short){
 			0xffff, 0xffff, 0, 0, 0, 0xffff, 0, 0};
-	vector unsigned char ol_flags = (vector unsigned char){0};
-	vector unsigned char ol_flags_mask = (vector unsigned char){0};
+	__vector unsigned char ol_flags = (__vector unsigned char){0};
+	__vector unsigned char ol_flags_mask = (__vector unsigned char){0};
 	unsigned int pos;
 	unsigned int i;
 	unsigned int inv = 0;
 
 #ifdef MLX5_PMD_SOFT_COUNTERS
-	const vector unsigned char ones = vec_splat_u8(-1);
+	const __vector unsigned char ones = vec_splat_u8(-1);
 	uint32_t rcvd_byte = 0;
 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
-	const vector unsigned char len_shuf_mask = (vector unsigned char){
+	const __vector unsigned char len_shuf_mask = (__vector unsigned char){
 		 3,  2, 11, 10,
 		 7,  6, 15, 14,
 		-1, -1, -1, -1,
@@ -133,125 +133,125 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 	 * E. store flow tag (rte_flow mark).
 	 */
 	for (pos = 0; pos < mcqe_n; ) {
-		vector unsigned char mcqe1, mcqe2;
-		vector unsigned char rxdf1, rxdf2;
+		__vector unsigned char mcqe1, mcqe2;
+		__vector unsigned char rxdf1, rxdf2;
 #ifdef MLX5_PMD_SOFT_COUNTERS
-		const vector unsigned short mcqe_sel_mask =
-			(vector unsigned short){0, 0, 0xffff, 0xffff,
+		const __vector unsigned short mcqe_sel_mask =
+			(__vector unsigned short){0, 0, 0xffff, 0xffff,
 			0, 0, 0xfff, 0xffff};
-		const vector unsigned char lower_half = {
+		const __vector unsigned char lower_half = {
 			0, 1, 4, 5, 8, 9, 12, 13, 16,
 			17, 20, 21, 24, 25, 28, 29};
-		const vector unsigned char upper_half = {
+		const __vector unsigned char upper_half = {
 			2, 3, 6, 7, 10, 11, 14, 15,
 			18, 19, 22, 23, 26, 27, 30, 31};
-		vector unsigned short left, right;
-		vector unsigned char byte_cnt, invalid_mask;
-		vector unsigned long lshift;
+		__vector unsigned short left, right;
+		__vector unsigned char byte_cnt, invalid_mask;
+		__vector unsigned long lshift;
 		__attribute__((altivec(vector__)))
 			__attribute__((altivec(bool__)))
 			unsigned long long shmask;
-		const vector unsigned long shmax = {64, 64};
+		const __vector unsigned long shmax = {64, 64};
 #endif
 
 		for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
 			if (likely(pos + i < mcqe_n))
 				rte_prefetch0((void *)(cq + pos + i));
 		/* A.1 load mCQEs into a 128bit register. */
-		mcqe1 = (vector unsigned char)vec_vsx_ld(0,
+		mcqe1 = (__vector unsigned char)vec_vsx_ld(0,
 			(signed int const *)&mcq[pos % 8]);
-		mcqe2 = (vector unsigned char)vec_vsx_ld(0,
+		mcqe2 = (__vector unsigned char)vec_vsx_ld(0,
 			(signed int const *)&mcq[pos % 8 + 2]);
 
 		/* B.1 store rearm data to mbuf. */
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&elts[pos]->rearm_data = rearm;
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&elts[pos + 1]->rearm_data = rearm;
 
 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
 		rxdf1 = vec_perm(mcqe1, zero, shuf_mask1);
 		rxdf2 = vec_perm(mcqe1, zero, shuf_mask2);
-		rxdf1 = (vector unsigned char)
-			((vector unsigned short)rxdf1 -
-			(vector unsigned short)crc_adj);
-		rxdf2 = (vector unsigned char)
-			((vector unsigned short)rxdf2 -
-			(vector unsigned short)crc_adj);
-		rxdf1 = (vector unsigned char)
-			vec_sel((vector unsigned short)rxdf1,
-			(vector unsigned short)rxdf, rxdf_sel_mask);
-		rxdf2 = (vector unsigned char)
-			vec_sel((vector unsigned short)rxdf2,
-			(vector unsigned short)rxdf, rxdf_sel_mask);
+		rxdf1 = (__vector unsigned char)
+			((__vector unsigned short)rxdf1 -
+			(__vector unsigned short)crc_adj);
+		rxdf2 = (__vector unsigned char)
+			((__vector unsigned short)rxdf2 -
+			(__vector unsigned short)crc_adj);
+		rxdf1 = (__vector unsigned char)
+			vec_sel((__vector unsigned short)rxdf1,
+			(__vector unsigned short)rxdf, rxdf_sel_mask);
+		rxdf2 = (__vector unsigned char)
+			vec_sel((__vector unsigned short)rxdf2,
+			(__vector unsigned short)rxdf, rxdf_sel_mask);
 
 		/* D.1 store rx_descriptor_fields1. */
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&elts[pos]->rx_descriptor_fields1 = rxdf1;
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&elts[pos + 1]->rx_descriptor_fields1 = rxdf2;
 
 		/* B.1 store rearm data to mbuf. */
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&elts[pos + 2]->rearm_data = rearm;
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&elts[pos + 3]->rearm_data = rearm;
 
 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
 		rxdf1 = vec_perm(mcqe2, zero, shuf_mask1);
 		rxdf2 = vec_perm(mcqe2, zero, shuf_mask2);
-		rxdf1 = (vector unsigned char)
-			((vector unsigned short)rxdf1 -
-			(vector unsigned short)crc_adj);
-		rxdf2 = (vector unsigned char)
-			((vector unsigned short)rxdf2 -
-			(vector unsigned short)crc_adj);
-		rxdf1 = (vector unsigned char)
-			vec_sel((vector unsigned short)rxdf1,
-			(vector unsigned short)rxdf, rxdf_sel_mask);
-		rxdf2 = (vector unsigned char)
-			vec_sel((vector unsigned short)rxdf2,
-			(vector unsigned short)rxdf, rxdf_sel_mask);
+		rxdf1 = (__vector unsigned char)
+			((__vector unsigned short)rxdf1 -
+			(__vector unsigned short)crc_adj);
+		rxdf2 = (__vector unsigned char)
+			((__vector unsigned short)rxdf2 -
+			(__vector unsigned short)crc_adj);
+		rxdf1 = (__vector unsigned char)
+			vec_sel((__vector unsigned short)rxdf1,
+			(__vector unsigned short)rxdf, rxdf_sel_mask);
+		rxdf2 = (__vector unsigned char)
+			vec_sel((__vector unsigned short)rxdf2,
+			(__vector unsigned short)rxdf, rxdf_sel_mask);
 
 		/* D.1 store rx_descriptor_fields1. */
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&elts[pos + 2]->rx_descriptor_fields1 = rxdf1;
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&elts[pos + 3]->rx_descriptor_fields1 = rxdf2;
 
 #ifdef MLX5_PMD_SOFT_COUNTERS
-		invalid_mask = (vector unsigned char)(vector unsigned long){
+		invalid_mask = (__vector unsigned char)(__vector unsigned long){
 			(mcqe_n - pos) * sizeof(uint16_t) * 8, 0};
 
 		lshift =
-			vec_splat((vector unsigned long)invalid_mask, 0);
+			vec_splat((__vector unsigned long)invalid_mask, 0);
 		shmask = vec_cmpgt(shmax, lshift);
-		invalid_mask = (vector unsigned char)
-			vec_sl((vector unsigned long)ones, lshift);
-		invalid_mask = (vector unsigned char)
-			vec_sel((vector unsigned long)shmask,
-			(vector unsigned long)invalid_mask, shmask);
+		invalid_mask = (__vector unsigned char)
+			vec_sl((__vector unsigned long)ones, lshift);
+		invalid_mask = (__vector unsigned char)
+			vec_sel((__vector unsigned long)shmask,
+			(__vector unsigned long)invalid_mask, shmask);
 
-		byte_cnt = (vector unsigned char)
-			vec_sel((vector unsigned short)
-			vec_sro((vector unsigned short)mcqe1,
-			(vector unsigned char){32}),
-			(vector unsigned short)mcqe2, mcqe_sel_mask);
+		byte_cnt = (__vector unsigned char)
+			vec_sel((__vector unsigned short)
+			vec_sro((__vector unsigned short)mcqe1,
+			(__vector unsigned char){32}),
+			(__vector unsigned short)mcqe2, mcqe_sel_mask);
 		byte_cnt = vec_perm(byte_cnt, zero, len_shuf_mask);
-		byte_cnt = (vector unsigned char)
-			vec_andc((vector unsigned long)byte_cnt,
-			(vector unsigned long)invalid_mask);
-		left = vec_perm((vector unsigned short)byte_cnt,
-			(vector unsigned short)zero, lower_half);
-		right = vec_perm((vector unsigned short)byte_cnt,
-			(vector unsigned short)zero, upper_half);
-		byte_cnt = (vector unsigned char)vec_add(left, right);
-		left = vec_perm((vector unsigned short)byte_cnt,
-			(vector unsigned short)zero, lower_half);
-		right = vec_perm((vector unsigned short)byte_cnt,
-			(vector unsigned short)zero, upper_half);
-		byte_cnt = (vector unsigned char)vec_add(left, right);
-		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
+		byte_cnt = (__vector unsigned char)
+			vec_andc((__vector unsigned long)byte_cnt,
+			(__vector unsigned long)invalid_mask);
+		left = vec_perm((__vector unsigned short)byte_cnt,
+			(__vector unsigned short)zero, lower_half);
+		right = vec_perm((__vector unsigned short)byte_cnt,
+			(__vector unsigned short)zero, upper_half);
+		byte_cnt = (__vector unsigned char)vec_add(left, right);
+		left = vec_perm((__vector unsigned short)byte_cnt,
+			(__vector unsigned short)zero, lower_half);
+		right = vec_perm((__vector unsigned short)byte_cnt,
+			(__vector unsigned short)zero, upper_half);
+		byte_cnt = (__vector unsigned char)vec_add(left, right);
+		rcvd_byte += ((__vector unsigned long)byte_cnt)[0];
 #endif
 
 		if (rxq->mark) {
@@ -265,99 +265,99 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 				elts[pos + 2]->hash.fdir.hi = flow_tag;
 				elts[pos + 3]->hash.fdir.hi = flow_tag;
 			} else {
-				const vector unsigned char flow_mark_adj =
-					(vector unsigned char)
-					(vector unsigned int){
+				const __vector unsigned char flow_mark_adj =
+					(__vector unsigned char)
+					(__vector unsigned int){
 					-1, -1, -1, -1};
-				const vector unsigned char flow_mark_shuf =
-					(vector unsigned char){
+				const __vector unsigned char flow_mark_shuf =
+					(__vector unsigned char){
 					-1, -1, -1, -1,
 					-1, -1, -1, -1,
 					12,  8,  9, -1,
 					 4,  0,  1,  -1};
-				const vector unsigned char ft_mask =
-					(vector unsigned char)
-					(vector unsigned int){
+				const __vector unsigned char ft_mask =
+					(__vector unsigned char)
+					(__vector unsigned int){
 					0xffffff00, 0xffffff00,
 					0xffffff00, 0xffffff00};
-				const vector unsigned char fdir_flags =
-					(vector unsigned char)
-					(vector unsigned int){
+				const __vector unsigned char fdir_flags =
+					(__vector unsigned char)
+					(__vector unsigned int){
 					RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR,
 					RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR};
-				const vector unsigned char fdir_all_flags =
-					(vector unsigned char)
-					(vector unsigned int){
+				const __vector unsigned char fdir_all_flags =
+					(__vector unsigned char)
+					(__vector unsigned int){
 					RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
 					RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
 					RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID,
 					RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID};
-				vector unsigned char fdir_id_flags =
-					(vector unsigned char)
-					(vector unsigned int){
+				__vector unsigned char fdir_id_flags =
+					(__vector unsigned char)
+					(__vector unsigned int){
 					RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID,
 					RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID};
 				/* Extract flow_tag field. */
-				vector unsigned char ftag0 = vec_perm(mcqe1,
+				__vector unsigned char ftag0 = vec_perm(mcqe1,
 							zero, flow_mark_shuf);
-				vector unsigned char ftag1 = vec_perm(mcqe2,
+				__vector unsigned char ftag1 = vec_perm(mcqe2,
 							zero, flow_mark_shuf);
-				vector unsigned char ftag =
-					(vector unsigned char)
-					vec_mergel((vector unsigned int)ftag0,
-					(vector unsigned int)ftag1);
-				vector unsigned char invalid_mask =
-					(vector unsigned char)
-					vec_cmpeq((vector unsigned int)ftag,
-					(vector unsigned int)zero);
+				__vector unsigned char ftag =
+					(__vector unsigned char)
+					vec_mergel((__vector unsigned int)ftag0,
+					(__vector unsigned int)ftag1);
+				__vector unsigned char invalid_mask =
+					(__vector unsigned char)
+					vec_cmpeq((__vector unsigned int)ftag,
+					(__vector unsigned int)zero);
 
-				ol_flags_mask = (vector unsigned char)
-					vec_or((vector unsigned long)
+				ol_flags_mask = (__vector unsigned char)
+					vec_or((__vector unsigned long)
 					ol_flags_mask,
-					(vector unsigned long)fdir_all_flags);
+					(__vector unsigned long)fdir_all_flags);
 
 				/* Set RTE_MBUF_F_RX_FDIR if flow tag is non-zero. */
-				invalid_mask = (vector unsigned char)
-					vec_cmpeq((vector unsigned int)ftag,
-					(vector unsigned int)zero);
-				ol_flags = (vector unsigned char)
-					vec_or((vector unsigned long)ol_flags,
-					(vector unsigned long)
-					vec_andc((vector unsigned long)
+				invalid_mask = (__vector unsigned char)
+					vec_cmpeq((__vector unsigned int)ftag,
+					(__vector unsigned int)zero);
+				ol_flags = (__vector unsigned char)
+					vec_or((__vector unsigned long)ol_flags,
+					(__vector unsigned long)
+					vec_andc((__vector unsigned long)
 					fdir_flags,
-					(vector unsigned long)invalid_mask));
-				ol_flags_mask = (vector unsigned char)
-					vec_or((vector unsigned long)
+					(__vector unsigned long)invalid_mask));
+				ol_flags_mask = (__vector unsigned char)
+					vec_or((__vector unsigned long)
 					ol_flags_mask,
-					(vector unsigned long)fdir_flags);
+					(__vector unsigned long)fdir_flags);
 
 				/* Mask out invalid entries. */
-				fdir_id_flags = (vector unsigned char)
-					vec_andc((vector unsigned long)
+				fdir_id_flags = (__vector unsigned char)
+					vec_andc((__vector unsigned long)
 					fdir_id_flags,
-					(vector unsigned long)invalid_mask);
+					(__vector unsigned long)invalid_mask);
 
 				/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
-				ol_flags = (vector unsigned char)
-					vec_or((vector unsigned long)ol_flags,
-					(vector unsigned long)
-					vec_andc((vector unsigned long)
+				ol_flags = (__vector unsigned char)
+					vec_or((__vector unsigned long)ol_flags,
+					(__vector unsigned long)
+					vec_andc((__vector unsigned long)
 					fdir_id_flags,
-					(vector unsigned long)
-					vec_cmpeq((vector unsigned int)ftag,
-					(vector unsigned int)ft_mask)));
+					(__vector unsigned long)
+					vec_cmpeq((__vector unsigned int)ftag,
+					(__vector unsigned int)ft_mask)));
 
-				ftag = (vector unsigned char)
-					((vector unsigned int)ftag +
-					(vector unsigned int)flow_mark_adj);
+				ftag = (__vector unsigned char)
+					((__vector unsigned int)ftag +
+					(__vector unsigned int)flow_mark_adj);
 				elts[pos]->hash.fdir.hi =
-					((vector unsigned int)ftag)[0];
+					((__vector unsigned int)ftag)[0];
 				elts[pos + 1]->hash.fdir.hi =
-					((vector unsigned int)ftag)[1];
+					((__vector unsigned int)ftag)[1];
 				elts[pos + 2]->hash.fdir.hi =
-					((vector unsigned int)ftag)[2];
+					((__vector unsigned int)ftag)[2];
 				elts[pos + 3]->hash.fdir.hi =
-					((vector unsigned int)ftag)[3];
+					((__vector unsigned int)ftag)[3];
 			}
 		}
 		if (unlikely(rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)) {
@@ -373,37 +373,37 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 					mcq[pos % 8 + 2].hdr_type;
 				const uint8_t pkt_hdr3 =
 					mcq[pos % 8 + 3].hdr_type;
-				const vector unsigned char vlan_mask =
-					(vector unsigned char)
-					(vector unsigned int) {
+				const __vector unsigned char vlan_mask =
+					(__vector unsigned char)
+					(__vector unsigned int) {
 					(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
 					(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
 					(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
 					(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED)};
-				const vector unsigned char cv_mask =
-					(vector unsigned char)
-					(vector unsigned int) {
+				const __vector unsigned char cv_mask =
+					(__vector unsigned char)
+					(__vector unsigned int) {
 					MLX5_CQE_VLAN_STRIPPED,
 					MLX5_CQE_VLAN_STRIPPED,
 					MLX5_CQE_VLAN_STRIPPED,
 					MLX5_CQE_VLAN_STRIPPED};
-				vector unsigned char pkt_cv =
-					(vector unsigned char)
-					(vector unsigned int) {
+				__vector unsigned char pkt_cv =
+					(__vector unsigned char)
+					(__vector unsigned int) {
 					pkt_hdr0 & 0x1, pkt_hdr1 & 0x1,
 					pkt_hdr2 & 0x1, pkt_hdr3 & 0x1};
 
-				ol_flags_mask = (vector unsigned char)
-					vec_or((vector unsigned long)
+				ol_flags_mask = (__vector unsigned char)
+					vec_or((__vector unsigned long)
 					ol_flags_mask,
-					(vector unsigned long)vlan_mask);
-				ol_flags = (vector unsigned char)
-					vec_or((vector unsigned long)ol_flags,
-					(vector unsigned long)
-					vec_and((vector unsigned long)vlan_mask,
-					(vector unsigned long)
-					vec_cmpeq((vector unsigned int)pkt_cv,
-					(vector unsigned int)cv_mask)));
+					(__vector unsigned long)vlan_mask);
+				ol_flags = (__vector unsigned char)
+					vec_or((__vector unsigned long)ol_flags,
+					(__vector unsigned long)
+					vec_and((__vector unsigned long)vlan_mask,
+					(__vector unsigned long)
+					vec_cmpeq((__vector unsigned int)pkt_cv,
+					(__vector unsigned int)cv_mask)));
 				elts[pos]->packet_type =
 					mlx5_ptype_table[(pkt_hdr0 >> 2) |
 							 pkt_info];
@@ -431,36 +431,36 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 						pkt_info) & (1 << 6));
 				}
 			}
-			const vector unsigned char hash_mask =
-				(vector unsigned char)(vector unsigned int) {
+			const __vector unsigned char hash_mask =
+				(__vector unsigned char)(__vector unsigned int) {
 					RTE_MBUF_F_RX_RSS_HASH,
 					RTE_MBUF_F_RX_RSS_HASH,
 					RTE_MBUF_F_RX_RSS_HASH,
 					RTE_MBUF_F_RX_RSS_HASH};
-			const vector unsigned char rearm_flags =
-				(vector unsigned char)(vector unsigned int) {
+			const __vector unsigned char rearm_flags =
+				(__vector unsigned char)(__vector unsigned int) {
 				(uint32_t)t_pkt->ol_flags,
 				(uint32_t)t_pkt->ol_flags,
 				(uint32_t)t_pkt->ol_flags,
 				(uint32_t)t_pkt->ol_flags};
 
-			ol_flags_mask = (vector unsigned char)
-				vec_or((vector unsigned long)ol_flags_mask,
-				(vector unsigned long)hash_mask);
-			ol_flags = (vector unsigned char)
-				vec_or((vector unsigned long)ol_flags,
-				(vector unsigned long)
-				vec_andc((vector unsigned long)rearm_flags,
-				(vector unsigned long)ol_flags_mask));
+			ol_flags_mask = (__vector unsigned char)
+				vec_or((__vector unsigned long)ol_flags_mask,
+				(__vector unsigned long)hash_mask);
+			ol_flags = (__vector unsigned char)
+				vec_or((__vector unsigned long)ol_flags,
+				(__vector unsigned long)
+				vec_andc((__vector unsigned long)rearm_flags,
+				(__vector unsigned long)ol_flags_mask));
 
 			elts[pos]->ol_flags =
-				((vector unsigned int)ol_flags)[0];
+				((__vector unsigned int)ol_flags)[0];
 			elts[pos + 1]->ol_flags =
-				((vector unsigned int)ol_flags)[1];
+				((__vector unsigned int)ol_flags)[1];
 			elts[pos + 2]->ol_flags =
-				((vector unsigned int)ol_flags)[2];
+				((__vector unsigned int)ol_flags)[2];
 			elts[pos + 3]->ol_flags =
-				((vector unsigned int)ol_flags)[3];
+				((__vector unsigned int)ol_flags)[3];
 			elts[pos]->hash.rss = 0;
 			elts[pos + 1]->hash.rss = 0;
 			elts[pos + 2]->hash.rss = 0;
@@ -524,13 +524,13 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
  */
 static inline void
 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
-		vector unsigned char cqes[4], vector unsigned char op_err,
+		__vector unsigned char cqes[4], __vector unsigned char op_err,
 		struct rte_mbuf **pkts)
 {
-	vector unsigned char pinfo0, pinfo1;
-	vector unsigned char pinfo, ptype;
-	vector unsigned char ol_flags = (vector unsigned char)
-		(vector unsigned int){
+	__vector unsigned char pinfo0, pinfo1;
+	__vector unsigned char pinfo, ptype;
+	__vector unsigned char ol_flags = (__vector unsigned char)
+		(__vector unsigned int){
 			rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
 				rxq->hw_timestamp * rxq->timestamp_rx_flag,
 			rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
@@ -539,25 +539,25 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
 				rxq->hw_timestamp * rxq->timestamp_rx_flag,
 			rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
 				rxq->hw_timestamp * rxq->timestamp_rx_flag};
-	vector unsigned char cv_flags;
-	const vector unsigned char zero = (vector unsigned char){0};
-	const vector unsigned char ptype_mask =
-		(vector unsigned char)(vector unsigned int){
+	__vector unsigned char cv_flags;
+	const __vector unsigned char zero = (__vector unsigned char){0};
+	const __vector unsigned char ptype_mask =
+		(__vector unsigned char)(__vector unsigned int){
 		0x0000fd06, 0x0000fd06, 0x0000fd06, 0x0000fd06};
-	const vector unsigned char ptype_ol_mask =
-		(vector unsigned char)(vector unsigned int){
+	const __vector unsigned char ptype_ol_mask =
+		(__vector unsigned char)(__vector unsigned int){
 		0x00000106, 0x00000106, 0x00000106, 0x00000106};
-	const vector unsigned char pinfo_mask =
-		(vector unsigned char)(vector unsigned int){
+	const __vector unsigned char pinfo_mask =
+		(__vector unsigned char)(__vector unsigned int){
 		0x00000003, 0x00000003, 0x00000003, 0x00000003};
-	const vector unsigned char cv_flag_sel = (vector unsigned char){
+	const __vector unsigned char cv_flag_sel = (__vector unsigned char){
 		0, (uint8_t)(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
 		(uint8_t)(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1), 0,
 		(uint8_t)(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1), 0,
 		(uint8_t)((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
 		0, 0, 0, 0, 0, 0, 0, 0, 0};
-	const vector unsigned char cv_mask =
-		(vector unsigned char)(vector unsigned int){
+	const __vector unsigned char cv_mask =
+		(__vector unsigned char)(__vector unsigned int){
 		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
 		RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
 		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
@@ -566,77 +566,77 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
 		RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
 		RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
 		RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED};
-	const vector unsigned char mbuf_init =
-		(vector unsigned char)vec_vsx_ld
-			(0, (vector unsigned char *)&rxq->mbuf_initializer);
-	const vector unsigned short rearm_sel_mask =
-		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
-	vector unsigned char rearm0, rearm1, rearm2, rearm3;
+	const __vector unsigned char mbuf_init =
+		(__vector unsigned char)vec_vsx_ld
+			(0, (__vector unsigned char *)&rxq->mbuf_initializer);
+	const __vector unsigned short rearm_sel_mask =
+		(__vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
+	__vector unsigned char rearm0, rearm1, rearm2, rearm3;
 	uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
 
 	/* Extract pkt_info field. */
-	pinfo0 = (vector unsigned char)
-		vec_mergeh((vector unsigned int)cqes[0],
-		(vector unsigned int)cqes[1]);
-	pinfo1 = (vector unsigned char)
-		vec_mergeh((vector unsigned int)cqes[2],
-		(vector unsigned int)cqes[3]);
-	pinfo = (vector unsigned char)
-		vec_mergeh((vector unsigned long)pinfo0,
-		(vector unsigned long)pinfo1);
+	pinfo0 = (__vector unsigned char)
+		vec_mergeh((__vector unsigned int)cqes[0],
+		(__vector unsigned int)cqes[1]);
+	pinfo1 = (__vector unsigned char)
+		vec_mergeh((__vector unsigned int)cqes[2],
+		(__vector unsigned int)cqes[3]);
+	pinfo = (__vector unsigned char)
+		vec_mergeh((__vector unsigned long)pinfo0,
+		(__vector unsigned long)pinfo1);
 
 	/* Extract hdr_type_etc field. */
-	pinfo0 = (vector unsigned char)
-		vec_mergel((vector unsigned int)cqes[0],
-		(vector unsigned int)cqes[1]);
-	pinfo1 = (vector unsigned char)
-		vec_mergel((vector unsigned int)cqes[2],
-		(vector unsigned int)cqes[3]);
-	ptype = (vector unsigned char)
-		vec_mergeh((vector unsigned long)pinfo0,
-		(vector unsigned long)pinfo1);
+	pinfo0 = (__vector unsigned char)
+		vec_mergel((__vector unsigned int)cqes[0],
+		(__vector unsigned int)cqes[1]);
+	pinfo1 = (__vector unsigned char)
+		vec_mergel((__vector unsigned int)cqes[2],
+		(__vector unsigned int)cqes[3]);
+	ptype = (__vector unsigned char)
+		vec_mergeh((__vector unsigned long)pinfo0,
+		(__vector unsigned long)pinfo1);
 
 	if (rxq->mark) {
-		const vector unsigned char pinfo_ft_mask =
-			(vector unsigned char)(vector unsigned int){
+		const __vector unsigned char pinfo_ft_mask =
+			(__vector unsigned char)(__vector unsigned int){
 			0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00};
-		const vector unsigned char fdir_flags =
-			(vector unsigned char)(vector unsigned int){
+		const __vector unsigned char fdir_flags =
+			(__vector unsigned char)(__vector unsigned int){
 			RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR,
 			RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_FDIR};
-		vector unsigned char fdir_id_flags =
-			(vector unsigned char)(vector unsigned int){
+		__vector unsigned char fdir_id_flags =
+			(__vector unsigned char)(__vector unsigned int){
 			RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID,
 			RTE_MBUF_F_RX_FDIR_ID, RTE_MBUF_F_RX_FDIR_ID};
-		vector unsigned char flow_tag, invalid_mask;
+		__vector unsigned char flow_tag, invalid_mask;
 
-		flow_tag = (vector unsigned char)
-			vec_and((vector unsigned long)pinfo,
-			(vector unsigned long)pinfo_ft_mask);
+		flow_tag = (__vector unsigned char)
+			vec_and((__vector unsigned long)pinfo,
+			(__vector unsigned long)pinfo_ft_mask);
 
 		/* Check if flow tag is non-zero then set RTE_MBUF_F_RX_FDIR. */
-		invalid_mask = (vector unsigned char)
-			vec_cmpeq((vector unsigned int)flow_tag,
-			(vector unsigned int)zero);
-		ol_flags = (vector unsigned char)
-			vec_or((vector unsigned long)ol_flags,
-			(vector unsigned long)
-			vec_andc((vector unsigned long)fdir_flags,
-			(vector unsigned long)invalid_mask));
+		invalid_mask = (__vector unsigned char)
+			vec_cmpeq((__vector unsigned int)flow_tag,
+			(__vector unsigned int)zero);
+		ol_flags = (__vector unsigned char)
+			vec_or((__vector unsigned long)ol_flags,
+			(__vector unsigned long)
+			vec_andc((__vector unsigned long)fdir_flags,
+			(__vector unsigned long)invalid_mask));
 
 		/* Mask out invalid entries. */
-		fdir_id_flags = (vector unsigned char)
-			vec_andc((vector unsigned long)fdir_id_flags,
-			(vector unsigned long)invalid_mask);
+		fdir_id_flags = (__vector unsigned char)
+			vec_andc((__vector unsigned long)fdir_id_flags,
+			(__vector unsigned long)invalid_mask);
 
 		/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
-		ol_flags = (vector unsigned char)
-			vec_or((vector unsigned long)ol_flags,
-			(vector unsigned long)
-			vec_andc((vector unsigned long)fdir_id_flags,
-			(vector unsigned long)
-			vec_cmpeq((vector unsigned int)flow_tag,
-			(vector unsigned int)pinfo_ft_mask)));
+		ol_flags = (__vector unsigned char)
+			vec_or((__vector unsigned long)ol_flags,
+			(__vector unsigned long)
+			vec_andc((__vector unsigned long)fdir_id_flags,
+			(__vector unsigned long)
+			vec_cmpeq((__vector unsigned int)flow_tag,
+			(__vector unsigned int)pinfo_ft_mask)));
 	}
 	/*
 	 * Merge the two fields to generate the following:
@@ -649,39 +649,39 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
 	 * bit[16]    = tunneled
 	 * bit[17]    = outer_l3_type
 	 */
-	ptype = (vector unsigned char)
-		vec_and((vector unsigned long)ptype,
-		(vector unsigned long)ptype_mask);
-	pinfo = (vector unsigned char)
-		vec_and((vector unsigned long)pinfo,
-		(vector unsigned long)pinfo_mask);
-	pinfo = (vector unsigned char)
-		vec_sl((vector unsigned int)pinfo,
-		(vector unsigned int){16, 16, 16, 16});
+	ptype = (__vector unsigned char)
+		vec_and((__vector unsigned long)ptype,
+		(__vector unsigned long)ptype_mask);
+	pinfo = (__vector unsigned char)
+		vec_and((__vector unsigned long)pinfo,
+		(__vector unsigned long)pinfo_mask);
+	pinfo = (__vector unsigned char)
+		vec_sl((__vector unsigned int)pinfo,
+		(__vector unsigned int){16, 16, 16, 16});
 
 	/* Make pinfo has merged fields for ol_flags calculation. */
-	pinfo = (vector unsigned char)
-		vec_or((vector unsigned long)ptype,
-		(vector unsigned long)pinfo);
-	ptype = (vector unsigned char)
-		vec_sr((vector unsigned int)pinfo,
-		(vector unsigned int){10, 10, 10, 10});
-	ptype = (vector unsigned char)
-		vec_packs((vector unsigned int)ptype,
-		(vector unsigned int)zero);
+	pinfo = (__vector unsigned char)
+		vec_or((__vector unsigned long)ptype,
+		(__vector unsigned long)pinfo);
+	ptype = (__vector unsigned char)
+		vec_sr((__vector unsigned int)pinfo,
+		(__vector unsigned int){10, 10, 10, 10});
+	ptype = (__vector unsigned char)
+		vec_packs((__vector unsigned int)ptype,
+		(__vector unsigned int)zero);
 
 	/* Errored packets will have RTE_PTYPE_ALL_MASK. */
-	op_err = (vector unsigned char)
-		vec_sr((vector unsigned short)op_err,
-		(vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8});
-	ptype = (vector unsigned char)
-		vec_or((vector unsigned long)ptype,
-		(vector unsigned long)op_err);
+	op_err = (__vector unsigned char)
+		vec_sr((__vector unsigned short)op_err,
+		(__vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8});
+	ptype = (__vector unsigned char)
+		vec_or((__vector unsigned long)ptype,
+		(__vector unsigned long)op_err);
 
-	pt_idx0 = (uint8_t)((vector unsigned char)ptype)[0];
-	pt_idx1 = (uint8_t)((vector unsigned char)ptype)[2];
-	pt_idx2 = (uint8_t)((vector unsigned char)ptype)[4];
-	pt_idx3 = (uint8_t)((vector unsigned char)ptype)[6];
+	pt_idx0 = (uint8_t)((__vector unsigned char)ptype)[0];
+	pt_idx1 = (uint8_t)((__vector unsigned char)ptype)[2];
+	pt_idx2 = (uint8_t)((__vector unsigned char)ptype)[4];
+	pt_idx3 = (uint8_t)((__vector unsigned char)ptype)[6];
 
 	pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
 		!!(pt_idx0 & (1 << 6)) * rxq->tunnel;
@@ -693,63 +693,63 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
 		!!(pt_idx3 & (1 << 6)) * rxq->tunnel;
 
 	/* Fill flags for checksum and VLAN. */
-	pinfo = (vector unsigned char)
-		vec_and((vector unsigned long)pinfo,
-		(vector unsigned long)ptype_ol_mask);
+	pinfo = (__vector unsigned char)
+		vec_and((__vector unsigned long)pinfo,
+		(__vector unsigned long)ptype_ol_mask);
 	pinfo = vec_perm(cv_flag_sel, zero, pinfo);
 
 	/* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
-	cv_flags = (vector unsigned char)
-		vec_sl((vector unsigned int)pinfo,
-		(vector unsigned int){9, 9, 9, 9});
-	cv_flags = (vector unsigned char)
-		vec_or((vector unsigned long)pinfo,
-		(vector unsigned long)cv_flags);
+	cv_flags = (__vector unsigned char)
+		vec_sl((__vector unsigned int)pinfo,
+		(__vector unsigned int){9, 9, 9, 9});
+	cv_flags = (__vector unsigned char)
+		vec_or((__vector unsigned long)pinfo,
+		(__vector unsigned long)cv_flags);
 
 	/* Move back flags to start from byte[0]. */
-	cv_flags = (vector unsigned char)
-		vec_sr((vector unsigned int)cv_flags,
-		(vector unsigned int){8, 8, 8, 8});
+	cv_flags = (__vector unsigned char)
+		vec_sr((__vector unsigned int)cv_flags,
+		(__vector unsigned int){8, 8, 8, 8});
 
 	/* Mask out garbage bits. */
-	cv_flags = (vector unsigned char)
-		vec_and((vector unsigned long)cv_flags,
-		(vector unsigned long)cv_mask);
+	cv_flags = (__vector unsigned char)
+		vec_and((__vector unsigned long)cv_flags,
+		(__vector unsigned long)cv_mask);
 
 	/* Merge to ol_flags. */
-	ol_flags = (vector unsigned char)
-		vec_or((vector unsigned long)ol_flags,
-		(vector unsigned long)cv_flags);
+	ol_flags = (__vector unsigned char)
+		vec_or((__vector unsigned long)ol_flags,
+		(__vector unsigned long)cv_flags);
 
 	/* Merge mbuf_init and ol_flags. */
-	rearm0 = (vector unsigned char)
-		vec_sel((vector unsigned short)mbuf_init,
-		(vector unsigned short)
-		vec_slo((vector unsigned short)ol_flags,
-		(vector unsigned char){64}), rearm_sel_mask);
-	rearm1 = (vector unsigned char)
-		vec_sel((vector unsigned short)mbuf_init,
-		(vector unsigned short)
-		vec_slo((vector unsigned short)ol_flags,
-		(vector unsigned char){32}), rearm_sel_mask);
-	rearm2 = (vector unsigned char)
-		vec_sel((vector unsigned short)mbuf_init,
-		(vector unsigned short)ol_flags, rearm_sel_mask);
-	rearm3 = (vector unsigned char)
-		vec_sel((vector unsigned short)mbuf_init,
-		(vector unsigned short)
-		vec_sro((vector unsigned short)ol_flags,
-		(vector unsigned char){32}), rearm_sel_mask);
+	rearm0 = (__vector unsigned char)
+		vec_sel((__vector unsigned short)mbuf_init,
+		(__vector unsigned short)
+		vec_slo((__vector unsigned short)ol_flags,
+		(__vector unsigned char){64}), rearm_sel_mask);
+	rearm1 = (__vector unsigned char)
+		vec_sel((__vector unsigned short)mbuf_init,
+		(__vector unsigned short)
+		vec_slo((__vector unsigned short)ol_flags,
+		(__vector unsigned char){32}), rearm_sel_mask);
+	rearm2 = (__vector unsigned char)
+		vec_sel((__vector unsigned short)mbuf_init,
+		(__vector unsigned short)ol_flags, rearm_sel_mask);
+	rearm3 = (__vector unsigned char)
+		vec_sel((__vector unsigned short)mbuf_init,
+		(__vector unsigned short)
+		vec_sro((__vector unsigned short)ol_flags,
+		(__vector unsigned char){32}), rearm_sel_mask);
 
 	/* Write 8B rearm_data and 8B ol_flags. */
 	vec_vsx_st(rearm0, 0,
-		(vector unsigned char *)&pkts[0]->rearm_data);
+		(__vector unsigned char *)&pkts[0]->rearm_data);
 	vec_vsx_st(rearm1, 0,
-		(vector unsigned char *)&pkts[1]->rearm_data);
+		(__vector unsigned char *)&pkts[1]->rearm_data);
 	vec_vsx_st(rearm2, 0,
-		(vector unsigned char *)&pkts[2]->rearm_data);
+		(__vector unsigned char *)&pkts[2]->rearm_data);
 	vec_vsx_st(rearm3, 0,
-		(vector unsigned char *)&pkts[3]->rearm_data);
+		(__vector unsigned char *)&pkts[3]->rearm_data);
 }
 
 /**
@@ -788,31 +788,31 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
 	uint16_t nocmp_n = 0;
 	unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
-	const vector unsigned char zero = (vector unsigned char){0};
-	const vector unsigned char ones = vec_splat_u8(-1);
-	const vector unsigned char owner_check =
-		(vector unsigned char)(vector unsigned long){
+	const __vector unsigned char zero = (__vector unsigned char){0};
+	const __vector unsigned char ones = vec_splat_u8(-1);
+	const __vector unsigned char owner_check =
+		(__vector unsigned char)(__vector unsigned long){
 		0x0100000001000000LL, 0x0100000001000000LL};
-	const vector unsigned char opcode_check =
-		(vector unsigned char)(vector unsigned long){
+	const __vector unsigned char opcode_check =
+		(__vector unsigned char)(__vector unsigned long){
 		0xf0000000f0000000LL, 0xf0000000f0000000LL};
-	const vector unsigned char format_check =
-		(vector unsigned char)(vector unsigned long){
+	const __vector unsigned char format_check =
+		(__vector unsigned char)(__vector unsigned long){
 		0x0c0000000c000000LL, 0x0c0000000c000000LL};
-	const vector unsigned char resp_err_check =
-		(vector unsigned char)(vector unsigned long){
+	const __vector unsigned char resp_err_check =
+		(__vector unsigned char)(__vector unsigned long){
 		0xe0000000e0000000LL, 0xe0000000e0000000LL};
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	uint32_t rcvd_byte = 0;
 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
-	const vector unsigned char len_shuf_mask = (vector unsigned char){
+	const __vector unsigned char len_shuf_mask = (__vector unsigned char){
 		 1,  0,  5,  4,
 		 9,  8, 13, 12,
 		-1, -1, -1, -1,
 		-1, -1, -1, -1};
 #endif
 	/* Mask to shuffle from extracted CQE to mbuf. */
-	const vector unsigned char shuf_mask = (vector unsigned char){
+	const __vector unsigned char shuf_mask = (__vector unsigned char){
 		 5,  4,           /* bswap16, pkt_len */
 		-1, -1,           /* zero out 2nd half of pkt_len */
 		 5,  4,           /* bswap16, data_len */
@@ -821,22 +821,22 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		 1,  2,  3, -1};  /* fdir.hi */
 	/* Mask to blend from the last Qword to the first DQword. */
 	/* Mask to blend from the last Qword to the first DQword. */
-	const vector unsigned char blend_mask = (vector unsigned char){
+	const __vector unsigned char blend_mask = (__vector unsigned char){
 		-1,  0,  0,  0,
 		 0,  0,  0,  0,
 		-1, -1, -1, -1,
 		-1, -1, -1, -1};
-	const vector unsigned char crc_adj =
-		(vector unsigned char)(vector unsigned short){
+	const __vector unsigned char crc_adj =
+		(__vector unsigned char)(__vector unsigned short){
 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0};
-	const vector unsigned char flow_mark_adj =
-		(vector unsigned char)(vector unsigned int){
+	const __vector unsigned char flow_mark_adj =
+		(__vector unsigned char)(__vector unsigned int){
 		0, 0, 0, rxq->mark * (-1)};
-	const vector unsigned short cqe_sel_mask1 =
-		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
-	const vector unsigned short cqe_sel_mask2 =
-		(vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
+	const __vector unsigned short cqe_sel_mask1 =
+		(__vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
+	const __vector unsigned short cqe_sel_mask2 =
+		(__vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
 
 	/*
 	 * A. load first Qword (8bytes) in one loop.
@@ -861,30 +861,30 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 	for (pos = 0;
 	     pos < pkts_n;
 	     pos += MLX5_VPMD_DESCS_PER_LOOP) {
-		vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP];
-		vector unsigned char cqe_tmp1, cqe_tmp2;
-		vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
-		vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
-		vector unsigned char opcode, owner_mask, invalid_mask;
-		vector unsigned char comp_mask;
-		vector unsigned char mask;
+		__vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP];
+		__vector unsigned char cqe_tmp1, cqe_tmp2;
+		__vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
+		__vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
+		__vector unsigned char opcode, owner_mask, invalid_mask;
+		__vector unsigned char comp_mask;
+		__vector unsigned char mask;
 #ifdef MLX5_PMD_SOFT_COUNTERS
-		const vector unsigned char lower_half = {
+		const __vector unsigned char lower_half = {
 			0, 1, 4, 5, 8, 9, 12, 13,
 			16, 17, 20, 21, 24, 25, 28, 29};
-		const vector unsigned char upper_half = {
+		const __vector unsigned char upper_half = {
 			2, 3, 6, 7, 10, 11, 14, 15,
 			18, 19, 22, 23, 26, 27, 30, 31};
-		const vector unsigned long shmax = {64, 64};
-		vector unsigned char byte_cnt;
-		vector unsigned short left, right;
-		vector unsigned long lshift;
-		vector __attribute__((altivec(bool__)))
+		const __vector unsigned long shmax = {64, 64};
+		__vector unsigned char byte_cnt;
+		__vector unsigned short left, right;
+		__vector unsigned long lshift;
+		__vector __attribute__((altivec(bool__)))
 			unsigned long shmask;
 #endif
-		vector unsigned char mbp1, mbp2;
-		vector unsigned char p =
-			(vector unsigned char)(vector unsigned short){
+		__vector unsigned char mbp1, mbp2;
+		__vector unsigned char p =
+			(__vector unsigned char)(__vector unsigned short){
 				0, 1, 2, 3, 0, 0, 0, 0};
 		unsigned int p1, p2, p3;
 
@@ -897,295 +897,295 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		}
 
 		/* A.0 do not cross the end of CQ. */
-		mask = (vector unsigned char)(vector unsigned long){
+		mask = (__vector unsigned char)(__vector unsigned long){
 			(pkts_n - pos) * sizeof(uint16_t) * 8, 0};
 
 		{
-			vector unsigned long lshift;
-			vector __attribute__((altivec(bool__)))
+			__vector unsigned long lshift;
+			__vector __attribute__((altivec(bool__)))
 				unsigned long shmask;
-			const vector unsigned long shmax = {64, 64};
+			const __vector unsigned long shmax = {64, 64};
 
-			lshift = vec_splat((vector unsigned long)mask, 0);
+			lshift = vec_splat((__vector unsigned long)mask, 0);
 			shmask = vec_cmpgt(shmax, lshift);
-			mask = (vector unsigned char)
-				vec_sl((vector unsigned long)ones, lshift);
-			mask = (vector unsigned char)
-				vec_sel((vector unsigned long)shmask,
-				(vector unsigned long)mask, shmask);
+			mask = (__vector unsigned char)
+				vec_sl((__vector unsigned long)ones, lshift);
+			mask = (__vector unsigned char)
+				vec_sel((__vector unsigned long)shmask,
+				(__vector unsigned long)mask, shmask);
 		}
 
-		p = (vector unsigned char)
-			vec_andc((vector unsigned long)p,
-			(vector unsigned long)mask);
+		p = (__vector unsigned char)
+			vec_andc((__vector unsigned long)p,
+			(__vector unsigned long)mask);
 
 		/* A.1 load cqes. */
-		p3 = (unsigned int)((vector unsigned short)p)[3];
-		cqes[3] = (vector unsigned char)(vector unsigned long){
+		p3 = (unsigned int)((__vector unsigned short)p)[3];
+		cqes[3] = (__vector unsigned char)(__vector unsigned long){
 			*(__rte_aligned(8) unsigned long *)
 			&cq[pos + p3].sop_drop_qpn, 0LL};
 		rte_compiler_barrier();
 
-		p2 = (unsigned int)((vector unsigned short)p)[2];
-		cqes[2] = (vector unsigned char)(vector unsigned long){
+		p2 = (unsigned int)((__vector unsigned short)p)[2];
+		cqes[2] = (__vector unsigned char)(__vector unsigned long){
 			*(__rte_aligned(8) unsigned long *)
 			&cq[pos + p2].sop_drop_qpn, 0LL};
 		rte_compiler_barrier();
 
 		/* B.1 load mbuf pointers. */
-		mbp1 = (vector unsigned char)vec_vsx_ld(0,
+		mbp1 = (__vector unsigned char)vec_vsx_ld(0,
 			(signed int const *)&elts[pos]);
-		mbp2 = (vector unsigned char)vec_vsx_ld(0,
+		mbp2 = (__vector unsigned char)vec_vsx_ld(0,
 			(signed int const *)&elts[pos + 2]);
 
 		/* A.1 load a block having op_own. */
-		p1 = (unsigned int)((vector unsigned short)p)[1];
-		cqes[1] = (vector unsigned char)(vector unsigned long){
+		p1 = (unsigned int)((__vector unsigned short)p)[1];
+		cqes[1] = (__vector unsigned char)(__vector unsigned long){
 			*(__rte_aligned(8) unsigned long *)
 			&cq[pos + p1].sop_drop_qpn, 0LL};
 		rte_compiler_barrier();
 
-		cqes[0] = (vector unsigned char)(vector unsigned long){
+		cqes[0] = (__vector unsigned char)(__vector unsigned long){
 			*(__rte_aligned(8) unsigned long *)
 			&cq[pos].sop_drop_qpn, 0LL};
 		rte_compiler_barrier();
 
 		/* B.2 copy mbuf pointers. */
-		*(vector unsigned char *)&pkts[pos] = mbp1;
-		*(vector unsigned char *)&pkts[pos + 2] = mbp2;
+		*(__vector unsigned char *)&pkts[pos] = mbp1;
+		*(__vector unsigned char *)&pkts[pos + 2] = mbp2;
 		rte_io_rmb();
 
 		/* C.1 load remaining CQE data and extract necessary fields. */
-		cqe_tmp2 = *(vector unsigned char *)
+		cqe_tmp2 = *(__vector unsigned char *)
 			&cq[pos + p3].pkt_info;
-		cqe_tmp1 = *(vector unsigned char *)
+		cqe_tmp1 = *(__vector unsigned char *)
 			&cq[pos + p2].pkt_info;
 		cqes[3] = vec_sel(cqes[3], cqe_tmp2, blend_mask);
 		cqes[2] = vec_sel(cqes[2], cqe_tmp1, blend_mask);
-		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
+		cqe_tmp2 = (__vector unsigned char)vec_vsx_ld(0,
 			(signed int const *)&cq[pos + p3].csum);
-		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
+		cqe_tmp1 = (__vector unsigned char)vec_vsx_ld(0,
 			(signed int const *)&cq[pos + p2].csum);
-		cqes[3] = (vector unsigned char)
-			vec_sel((vector unsigned short)cqes[3],
-			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
-		cqes[2] = (vector unsigned char)
-			vec_sel((vector unsigned short)cqes[2],
-			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
-		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
+		cqes[3] = (__vector unsigned char)
+			vec_sel((__vector unsigned short)cqes[3],
+			(__vector unsigned short)cqe_tmp2, cqe_sel_mask1);
+		cqes[2] = (__vector unsigned char)
+			vec_sel((__vector unsigned short)cqes[2],
+			(__vector unsigned short)cqe_tmp1, cqe_sel_mask1);
+		cqe_tmp2 = (__vector unsigned char)(__vector unsigned long){
 			*(__rte_aligned(8) unsigned long *)
 			&cq[pos + p3].rsvd4[2], 0LL};
-		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
+		cqe_tmp1 = (__vector unsigned char)(__vector unsigned long){
 			*(__rte_aligned(8) unsigned long *)
 			&cq[pos + p2].rsvd4[2], 0LL};
-		cqes[3] = (vector unsigned char)
-			vec_sel((vector unsigned short)cqes[3],
-			(vector unsigned short)cqe_tmp2,
-			(vector unsigned short)cqe_sel_mask2);
-		cqes[2] = (vector unsigned char)
-			vec_sel((vector unsigned short)cqes[2],
-			(vector unsigned short)cqe_tmp1,
-			(vector unsigned short)cqe_sel_mask2);
+		cqes[3] = (__vector unsigned char)
+			vec_sel((__vector unsigned short)cqes[3],
+			(__vector unsigned short)cqe_tmp2,
+			(__vector unsigned short)cqe_sel_mask2);
+		cqes[2] = (__vector unsigned char)
+			vec_sel((__vector unsigned short)cqes[2],
+			(__vector unsigned short)cqe_tmp1,
+			(__vector unsigned short)cqe_sel_mask2);
 
 		/* C.2 generate final structure for mbuf with swapping bytes. */
 		pkt_mb3 = vec_perm(cqes[3], zero, shuf_mask);
 		pkt_mb2 = vec_perm(cqes[2], zero, shuf_mask);
 
 		/* C.3 adjust CRC length. */
-		pkt_mb3 = (vector unsigned char)
-			((vector unsigned short)pkt_mb3 -
-			(vector unsigned short)crc_adj);
-		pkt_mb2 = (vector unsigned char)
-			((vector unsigned short)pkt_mb2 -
-			(vector unsigned short)crc_adj);
+		pkt_mb3 = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb3 -
+			(__vector unsigned short)crc_adj);
+		pkt_mb2 = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb2 -
+			(__vector unsigned short)crc_adj);
 
 		/* C.4 adjust flow mark. */
-		pkt_mb3 = (vector unsigned char)
-			((vector unsigned int)pkt_mb3 +
-			(vector unsigned int)flow_mark_adj);
-		pkt_mb2 = (vector unsigned char)
-			((vector unsigned int)pkt_mb2 +
-			(vector unsigned int)flow_mark_adj);
+		pkt_mb3 = (__vector unsigned char)
+			((__vector unsigned int)pkt_mb3 +
+			(__vector unsigned int)flow_mark_adj);
+		pkt_mb2 = (__vector unsigned char)
+			((__vector unsigned int)pkt_mb2 +
+			(__vector unsigned int)flow_mark_adj);
 
 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&pkts[pos + 3]->pkt_len = pkt_mb3;
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&pkts[pos + 2]->pkt_len = pkt_mb2;
 
 		/* E.1 extract op_own field. */
-		op_own_tmp2 = (vector unsigned char)
-			vec_mergeh((vector unsigned int)cqes[2],
-			(vector unsigned int)cqes[3]);
+		op_own_tmp2 = (__vector unsigned char)
+			vec_mergeh((__vector unsigned int)cqes[2],
+			(__vector unsigned int)cqes[3]);
 
 		/* C.1 load remaining CQE data and extract necessary fields. */
-		cqe_tmp2 = *(vector unsigned char *)
+		cqe_tmp2 = *(__vector unsigned char *)
 			&cq[pos + p1].pkt_info;
-		cqe_tmp1 = *(vector unsigned char *)
+		cqe_tmp1 = *(__vector unsigned char *)
 			&cq[pos].pkt_info;
 		cqes[1] = vec_sel(cqes[1], cqe_tmp2, blend_mask);
 		cqes[0] = vec_sel(cqes[0], cqe_tmp2, blend_mask);
-		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
+		cqe_tmp2 = (__vector unsigned char)vec_vsx_ld(0,
 			(signed int const *)&cq[pos + p1].csum);
-		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
+		cqe_tmp1 = (__vector unsigned char)vec_vsx_ld(0,
 			(signed int const *)&cq[pos].csum);
-		cqes[1] = (vector unsigned char)
-			vec_sel((vector unsigned short)cqes[1],
-			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
-		cqes[0] = (vector unsigned char)
-			vec_sel((vector unsigned short)cqes[0],
-			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
-		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
+		cqes[1] = (__vector unsigned char)
+			vec_sel((__vector unsigned short)cqes[1],
+			(__vector unsigned short)cqe_tmp2, cqe_sel_mask1);
+		cqes[0] = (__vector unsigned char)
+			vec_sel((__vector unsigned short)cqes[0],
+			(__vector unsigned short)cqe_tmp1, cqe_sel_mask1);
+		cqe_tmp2 = (__vector unsigned char)(__vector unsigned long){
 			*(__rte_aligned(8) unsigned long *)
 			&cq[pos + p1].rsvd4[2], 0LL};
-		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
+		cqe_tmp1 = (__vector unsigned char)(__vector unsigned long){
 			*(__rte_aligned(8) unsigned long *)
 			&cq[pos].rsvd4[2], 0LL};
-		cqes[1] = (vector unsigned char)
-			vec_sel((vector unsigned short)cqes[1],
-			(vector unsigned short)cqe_tmp2, cqe_sel_mask2);
-		cqes[0] = (vector unsigned char)
-			vec_sel((vector unsigned short)cqes[0],
-			(vector unsigned short)cqe_tmp1, cqe_sel_mask2);
+		cqes[1] = (__vector unsigned char)
+			vec_sel((__vector unsigned short)cqes[1],
+			(__vector unsigned short)cqe_tmp2, cqe_sel_mask2);
+		cqes[0] = (__vector unsigned char)
+			vec_sel((__vector unsigned short)cqes[0],
+			(__vector unsigned short)cqe_tmp1, cqe_sel_mask2);
 
 		/* C.2 generate final structure for mbuf with swapping bytes. */
 		pkt_mb1 = vec_perm(cqes[1], zero, shuf_mask);
 		pkt_mb0 = vec_perm(cqes[0], zero, shuf_mask);
 
 		/* C.3 adjust CRC length. */
-		pkt_mb1 = (vector unsigned char)
-			((vector unsigned short)pkt_mb1 -
-			(vector unsigned short)crc_adj);
-		pkt_mb0 = (vector unsigned char)
-			((vector unsigned short)pkt_mb0 -
-			(vector unsigned short)crc_adj);
+		pkt_mb1 = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb1 -
+			(__vector unsigned short)crc_adj);
+		pkt_mb0 = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb0 -
+			(__vector unsigned short)crc_adj);
 
 		/* C.4 adjust flow mark. */
-		pkt_mb1 = (vector unsigned char)
-			((vector unsigned int)pkt_mb1 +
-			(vector unsigned int)flow_mark_adj);
-		pkt_mb0 = (vector unsigned char)
-			((vector unsigned int)pkt_mb0 +
-			(vector unsigned int)flow_mark_adj);
+		pkt_mb1 = (__vector unsigned char)
+			((__vector unsigned int)pkt_mb1 +
+			(__vector unsigned int)flow_mark_adj);
+		pkt_mb0 = (__vector unsigned char)
+			((__vector unsigned int)pkt_mb0 +
+			(__vector unsigned int)flow_mark_adj);
 
 		/* E.1 extract op_own byte. */
-		op_own_tmp1 = (vector unsigned char)
-			vec_mergeh((vector unsigned int)cqes[0],
-			(vector unsigned int)cqes[1]);
-		op_own = (vector unsigned char)
-			vec_mergel((vector unsigned long)op_own_tmp1,
-			(vector unsigned long)op_own_tmp2);
+		op_own_tmp1 = (__vector unsigned char)
+			vec_mergeh((__vector unsigned int)cqes[0],
+			(__vector unsigned int)cqes[1]);
+		op_own = (__vector unsigned char)
+			vec_mergel((__vector unsigned long)op_own_tmp1,
+			(__vector unsigned long)op_own_tmp2);
 
 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&pkts[pos + 1]->pkt_len = pkt_mb1;
-		*(vector unsigned char *)
+		*(__vector unsigned char *)
 			&pkts[pos]->pkt_len = pkt_mb0;
 
 		/* E.2 flip owner bit to mark CQEs from last round. */
-		owner_mask = (vector unsigned char)
-			vec_and((vector unsigned long)op_own,
-			(vector unsigned long)owner_check);
+		owner_mask = (__vector unsigned char)
+			vec_and((__vector unsigned long)op_own,
+			(__vector unsigned long)owner_check);
 		if (ownership)
-			owner_mask = (vector unsigned char)
-				vec_xor((vector unsigned long)owner_mask,
-				(vector unsigned long)owner_check);
-		owner_mask = (vector unsigned char)
-			vec_cmpeq((vector unsigned int)owner_mask,
-			(vector unsigned int)owner_check);
-		owner_mask = (vector unsigned char)
-			vec_packs((vector unsigned int)owner_mask,
-			(vector unsigned int)zero);
+			owner_mask = (__vector unsigned char)
+				vec_xor((__vector unsigned long)owner_mask,
+				(__vector unsigned long)owner_check);
+		owner_mask = (__vector unsigned char)
+			vec_cmpeq((__vector unsigned int)owner_mask,
+			(__vector unsigned int)owner_check);
+		owner_mask = (__vector unsigned char)
+			vec_packs((__vector unsigned int)owner_mask,
+			(__vector unsigned int)zero);
 
 		/* E.3 get mask for invalidated CQEs. */
-		opcode = (vector unsigned char)
-			vec_and((vector unsigned long)op_own,
-			(vector unsigned long)opcode_check);
-		invalid_mask = (vector unsigned char)
-			vec_cmpeq((vector unsigned int)opcode_check,
-			(vector unsigned int)opcode);
-		invalid_mask = (vector unsigned char)
-			vec_packs((vector unsigned int)invalid_mask,
-			(vector unsigned int)zero);
+		opcode = (__vector unsigned char)
+			vec_and((__vector unsigned long)op_own,
+			(__vector unsigned long)opcode_check);
+		invalid_mask = (__vector unsigned char)
+			vec_cmpeq((__vector unsigned int)opcode_check,
+			(__vector unsigned int)opcode);
+		invalid_mask = (__vector unsigned char)
+			vec_packs((__vector unsigned int)invalid_mask,
+			(__vector unsigned int)zero);
 
 		/* E.4 mask out beyond boundary. */
-		invalid_mask = (vector unsigned char)
-			vec_or((vector unsigned long)invalid_mask,
-			(vector unsigned long)mask);
+		invalid_mask = (__vector unsigned char)
+			vec_or((__vector unsigned long)invalid_mask,
+			(__vector unsigned long)mask);
 
 		/* E.5 merge invalid_mask with invalid owner. */
-		invalid_mask = (vector unsigned char)
-			vec_or((vector unsigned long)invalid_mask,
-			(vector unsigned long)owner_mask);
+		invalid_mask = (__vector unsigned char)
+			vec_or((__vector unsigned long)invalid_mask,
+			(__vector unsigned long)owner_mask);
 
 		/* F.1 find compressed CQE format. */
-		comp_mask = (vector unsigned char)
-			vec_and((vector unsigned long)op_own,
-			(vector unsigned long)format_check);
-		comp_mask = (vector unsigned char)
-			vec_cmpeq((vector unsigned int)comp_mask,
-			(vector unsigned int)format_check);
-		comp_mask = (vector unsigned char)
-			vec_packs((vector unsigned int)comp_mask,
-			(vector unsigned int)zero);
+		comp_mask = (__vector unsigned char)
+			vec_and((__vector unsigned long)op_own,
+			(__vector unsigned long)format_check);
+		comp_mask = (__vector unsigned char)
+			vec_cmpeq((__vector unsigned int)comp_mask,
+			(__vector unsigned int)format_check);
+		comp_mask = (__vector unsigned char)
+			vec_packs((__vector unsigned int)comp_mask,
+			(__vector unsigned int)zero);
 
 		/* F.2 mask out invalid entries. */
-		comp_mask = (vector unsigned char)
-			vec_andc((vector unsigned long)comp_mask,
-			(vector unsigned long)invalid_mask);
-		comp_idx = ((vector unsigned long)comp_mask)[0];
+		comp_mask = (__vector unsigned char)
+			vec_andc((__vector unsigned long)comp_mask,
+			(__vector unsigned long)invalid_mask);
+		comp_idx = ((__vector unsigned long)comp_mask)[0];
 
 		/* F.3 get the first compressed CQE. */
 		comp_idx = comp_idx ? __builtin_ctzll(comp_idx) /
 			(sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP;
 
 		/* E.6 mask out entries after the compressed CQE. */
-		mask = (vector unsigned char)(vector unsigned long){
+		mask = (__vector unsigned char)(__vector unsigned long){
 			(comp_idx * sizeof(uint16_t) * 8), 0};
-		lshift = vec_splat((vector unsigned long)mask, 0);
+		lshift = vec_splat((__vector unsigned long)mask, 0);
 		shmask = vec_cmpgt(shmax, lshift);
-		mask = (vector unsigned char)
-			vec_sl((vector unsigned long)ones, lshift);
-		mask = (vector unsigned char)
-			vec_sel((vector unsigned long)shmask,
-			(vector unsigned long)mask, shmask);
-		invalid_mask = (vector unsigned char)
-			vec_or((vector unsigned long)invalid_mask,
-			(vector unsigned long)mask);
+		mask = (__vector unsigned char)
+			vec_sl((__vector unsigned long)ones, lshift);
+		mask = (__vector unsigned char)
+			vec_sel((__vector unsigned long)shmask,
+			(__vector unsigned long)mask, shmask);
+		invalid_mask = (__vector unsigned char)
+			vec_or((__vector unsigned long)invalid_mask,
+			(__vector unsigned long)mask);
 
 		/* E.7 count non-compressed valid CQEs. */
-		n = ((vector unsigned long)invalid_mask)[0];
+		n = ((__vector unsigned long)invalid_mask)[0];
 		n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
 			MLX5_VPMD_DESCS_PER_LOOP;
 		nocmp_n += n;
 
 		/* D.2 get the final invalid mask. */
-		mask = (vector unsigned char)(vector unsigned long){
+		mask = (__vector unsigned char)(__vector unsigned long){
 			(n * sizeof(uint16_t) * 8), 0};
-		lshift = vec_splat((vector unsigned long)mask, 0);
+		lshift = vec_splat((__vector unsigned long)mask, 0);
 		shmask = vec_cmpgt(shmax, lshift);
-		mask = (vector unsigned char)
-			vec_sl((vector unsigned long)ones, lshift);
-		mask = (vector unsigned char)
-			vec_sel((vector unsigned long)shmask,
-			(vector unsigned long)mask, shmask);
-		invalid_mask = (vector unsigned char)
-			vec_or((vector unsigned long)invalid_mask,
-			(vector unsigned long)mask);
+		mask = (__vector unsigned char)
+			vec_sl((__vector unsigned long)ones, lshift);
+		mask = (__vector unsigned char)
+			vec_sel((__vector unsigned long)shmask,
+			(__vector unsigned long)mask, shmask);
+		invalid_mask = (__vector unsigned char)
+			vec_or((__vector unsigned long)invalid_mask,
+			(__vector unsigned long)mask);
 
 		/* D.3 check error in opcode. */
-		opcode = (vector unsigned char)
-			vec_cmpeq((vector unsigned int)resp_err_check,
-			(vector unsigned int)opcode);
-		opcode = (vector unsigned char)
-			vec_packs((vector unsigned int)opcode,
-			(vector unsigned int)zero);
-		opcode = (vector unsigned char)
-			vec_andc((vector unsigned long)opcode,
-			(vector unsigned long)invalid_mask);
+		opcode = (__vector unsigned char)
+			vec_cmpeq((__vector unsigned int)resp_err_check,
+			(__vector unsigned int)opcode);
+		opcode = (__vector unsigned char)
+			vec_packs((__vector unsigned int)opcode,
+			(__vector unsigned int)zero);
+		opcode = (__vector unsigned char)
+			vec_andc((__vector unsigned long)opcode,
+			(__vector unsigned long)invalid_mask);
 
 		/* D.4 mark if any error is set */
-		*err |= ((vector unsigned long)opcode)[0];
+		*err |= ((__vector unsigned long)opcode)[0];
 
 		/* D.5 fill in mbuf - rearm_data and packet_type. */
 		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
@@ -1255,20 +1255,20 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		/* Add up received bytes count. */
 		byte_cnt = vec_perm(op_own, zero, len_shuf_mask);
-		byte_cnt = (vector unsigned char)
-			vec_andc((vector unsigned long)byte_cnt,
-			(vector unsigned long)invalid_mask);
-		left = vec_perm((vector unsigned short)byte_cnt,
-			(vector unsigned short)zero, lower_half);
-		right = vec_perm((vector unsigned short)byte_cnt,
-			(vector unsigned short)zero, upper_half);
-		byte_cnt = (vector unsigned char)vec_add(left, right);
-		left = vec_perm((vector unsigned short)byte_cnt,
-			(vector unsigned short)zero, lower_half);
-		right = vec_perm((vector unsigned short)byte_cnt,
-			(vector unsigned short)zero, upper_half);
-		byte_cnt = (vector unsigned char)vec_add(left, right);
-		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
+		byte_cnt = (__vector unsigned char)
+			vec_andc((__vector unsigned long)byte_cnt,
+			(__vector unsigned long)invalid_mask);
+		left = vec_perm((__vector unsigned short)byte_cnt,
+			(__vector unsigned short)zero, lower_half);
+		right = vec_perm((__vector unsigned short)byte_cnt,
+			(__vector unsigned short)zero, upper_half);
+		byte_cnt = (__vector unsigned char)vec_add(left, right);
+		left = vec_perm((__vector unsigned short)byte_cnt,
+			(__vector unsigned short)zero, lower_half);
+		right = vec_perm((__vector unsigned short)byte_cnt,
+			(__vector unsigned short)zero, upper_half);
+		byte_cnt = (__vector unsigned char)vec_add(left, right);
+		rcvd_byte += ((__vector unsigned long)byte_cnt)[0];
 #endif
 
 		/*
diff --git a/drivers/net/virtio/virtio_rxtx_simple_altivec.c b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
index 7534974ef4..e7f0ed6068 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
@@ -50,9 +50,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	struct rte_mbuf **sw_ring_end;
 	struct rte_mbuf **ref_rx_pkts;
 	uint16_t nb_pkts_received = 0;
-	const vector unsigned char zero = {0};
+	const __vector unsigned char zero = {0};
 
-	const vector unsigned char shuf_msk1 = {
+	const __vector unsigned char shuf_msk1 = {
 		0xFF, 0xFF, 0xFF, 0xFF,	/* packet type */
 		4, 5, 0xFF, 0xFF, /* vlan tci */
 		4, 5,			/* dat len */
@@ -60,7 +60,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		0xFF, 0xFF, 0xFF, 0xFF
 	};
 
-	const vector unsigned char shuf_msk2 = {
+	const __vector unsigned char shuf_msk2 = {
 		0xFF, 0xFF, 0xFF, 0xFF,	/* packet type */
 		12, 13, 0xFF, 0xFF,	/* pkt len */
 		12, 13,			/* dat len */
@@ -72,7 +72,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	 * Subtract the header length.
 	 *  In which case do we need the header length in used->len ?
 	 */
-	const vector unsigned short len_adjust = {
+	const __vector unsigned short len_adjust = {
 		0, 0,
 		(uint16_t)-vq->hw->vtnet_hdr_size, 0,
 		(uint16_t)-vq->hw->vtnet_hdr_size, 0,
@@ -112,68 +112,68 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	ref_rx_pkts = rx_pkts;
 	for (nb_pkts_received = 0;
 		nb_pkts_received < nb_total;) {
-		vector unsigned char desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
-		vector unsigned char mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
-		vector unsigned char pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
+		__vector unsigned char desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
+		__vector unsigned char mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
+		__vector unsigned char pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
 
 		mbp[0] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 0));
 		desc[0] = vec_vsx_ld(0, (unsigned char const *)(rused + 0));
-		*(vector unsigned char *)&rx_pkts[0] = mbp[0];
+		*(__vector unsigned char *)&rx_pkts[0] = mbp[0];
 
 		mbp[1] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 2));
 		desc[1] = vec_vsx_ld(0, (unsigned char const *)(rused + 2));
-		*(vector unsigned char *)&rx_pkts[2] = mbp[1];
+		*(__vector unsigned char *)&rx_pkts[2] = mbp[1];
 
 		mbp[2] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 4));
 		desc[2] = vec_vsx_ld(0, (unsigned char const *)(rused + 4));
-		*(vector unsigned char *)&rx_pkts[4] = mbp[2];
+		*(__vector unsigned char *)&rx_pkts[4] = mbp[2];
 
 		mbp[3] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 6));
 		desc[3] = vec_vsx_ld(0, (unsigned char const *)(rused + 6));
-		*(vector unsigned char *)&rx_pkts[6] = mbp[3];
+		*(__vector unsigned char *)&rx_pkts[6] = mbp[3];
 
 		pkt_mb[0] = vec_perm(desc[0], zero, shuf_msk1);
 		pkt_mb[1] = vec_perm(desc[0], zero, shuf_msk2);
-		pkt_mb[0] = (vector unsigned char)
-			((vector unsigned short)pkt_mb[0] + len_adjust);
-		pkt_mb[1] = (vector unsigned char)
-			((vector unsigned short)pkt_mb[1] + len_adjust);
-		*(vector unsigned char *)&rx_pkts[0]->rx_descriptor_fields1 =
+		pkt_mb[0] = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb[0] + len_adjust);
+		pkt_mb[1] = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb[1] + len_adjust);
+		*(__vector unsigned char *)&rx_pkts[0]->rx_descriptor_fields1 =
 			pkt_mb[0];
-		*(vector unsigned char *)&rx_pkts[1]->rx_descriptor_fields1 =
+		*(__vector unsigned char *)&rx_pkts[1]->rx_descriptor_fields1 =
 			pkt_mb[1];
 
 		pkt_mb[2] = vec_perm(desc[1], zero, shuf_msk1);
 		pkt_mb[3] = vec_perm(desc[1], zero, shuf_msk2);
-		pkt_mb[2] = (vector unsigned char)
-			((vector unsigned short)pkt_mb[2] + len_adjust);
-		pkt_mb[3] = (vector unsigned char)
-			((vector unsigned short)pkt_mb[3] + len_adjust);
-		*(vector unsigned char *)&rx_pkts[2]->rx_descriptor_fields1 =
+		pkt_mb[2] = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb[2] + len_adjust);
+		pkt_mb[3] = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb[3] + len_adjust);
+		*(__vector unsigned char *)&rx_pkts[2]->rx_descriptor_fields1 =
 			pkt_mb[2];
-		*(vector unsigned char *)&rx_pkts[3]->rx_descriptor_fields1 =
+		*(__vector unsigned char *)&rx_pkts[3]->rx_descriptor_fields1 =
 			pkt_mb[3];
 
 		pkt_mb[4] = vec_perm(desc[2], zero, shuf_msk1);
 		pkt_mb[5] = vec_perm(desc[2], zero, shuf_msk2);
-		pkt_mb[4] = (vector unsigned char)
-			((vector unsigned short)pkt_mb[4] + len_adjust);
-		pkt_mb[5] = (vector unsigned char)
-			((vector unsigned short)pkt_mb[5] + len_adjust);
-		*(vector unsigned char *)&rx_pkts[4]->rx_descriptor_fields1 =
+		pkt_mb[4] = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb[4] + len_adjust);
+		pkt_mb[5] = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb[5] + len_adjust);
+		*(__vector unsigned char *)&rx_pkts[4]->rx_descriptor_fields1 =
 			pkt_mb[4];
-		*(vector unsigned char *)&rx_pkts[5]->rx_descriptor_fields1 =
+		*(__vector unsigned char *)&rx_pkts[5]->rx_descriptor_fields1 =
 			pkt_mb[5];
 
 		pkt_mb[6] = vec_perm(desc[3], zero, shuf_msk1);
 		pkt_mb[7] = vec_perm(desc[3], zero, shuf_msk2);
-		pkt_mb[6] = (vector unsigned char)
-			((vector unsigned short)pkt_mb[6] + len_adjust);
-		pkt_mb[7] = (vector unsigned char)
-			((vector unsigned short)pkt_mb[7] + len_adjust);
-		*(vector unsigned char *)&rx_pkts[6]->rx_descriptor_fields1 =
+		pkt_mb[6] = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb[6] + len_adjust);
+		pkt_mb[7] = (__vector unsigned char)
+			((__vector unsigned short)pkt_mb[7] + len_adjust);
+		*(__vector unsigned char *)&rx_pkts[6]->rx_descriptor_fields1 =
 			pkt_mb[6];
-		*(vector unsigned char *)&rx_pkts[7]->rx_descriptor_fields1 =
+		*(__vector unsigned char *)&rx_pkts[7]->rx_descriptor_fields1 =
 			pkt_mb[7];
 
 		if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
diff --git a/examples/l3fwd/l3fwd_lpm_altivec.h b/examples/l3fwd/l3fwd_lpm_altivec.h
index 7c6814252a..0c6852a7bb 100644
--- a/examples/l3fwd/l3fwd_lpm_altivec.h
+++ b/examples/l3fwd/l3fwd_lpm_altivec.h
@@ -14,7 +14,7 @@
  */
 static inline void
 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
-		vector unsigned int *dip,
+		__vector unsigned int *dip,
 		uint32_t *ipv4_flag)
 {
 	struct rte_ipv4_hdr *ipv4_hdr;
@@ -45,7 +45,7 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
 	ipv4_flag[0] &= pkt[3]->packet_type;
 
 	rte_compiler_barrier();
-	dip[0] = (vector unsigned int){x0, x1, x2, x3};
+	dip[0] = (__vector unsigned int){x0, x1, x2, x3};
 }
 
 /*
@@ -54,22 +54,22 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
  */
 static inline void
 processx4_step2(const struct lcore_conf *qconf,
-		vector unsigned int dip,
+		__vector unsigned int dip,
 		uint32_t ipv4_flag,
 		uint8_t portid,
 		struct rte_mbuf *pkt[FWDSTEP],
 		uint16_t dprt[FWDSTEP])
 {
 	rte_xmm_t dst;
-	const vector unsigned char bswap_mask = (vector unsigned char){
+	const __vector unsigned char bswap_mask = (__vector unsigned char){
 							3, 2, 1, 0,
 							7, 6, 5, 4,
 							11, 10, 9, 8,
 							15, 14, 13, 12};
 
 	/* Byte swap 4 IPV4 addresses. */
-	dip = (vector unsigned int)vec_perm(*(vector unsigned char *)&dip,
-					(vector unsigned char){}, bswap_mask);
+	dip = (__vector unsigned int)vec_perm(*(__vector unsigned char *)&dip,
+					(__vector unsigned char){}, bswap_mask);
 
 	/* if all 4 packets are IPV4. */
 	if (likely(ipv4_flag)) {
@@ -101,7 +101,7 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
 {
 	int32_t j;
 	uint16_t dst_port[MAX_PKT_BURST];
-	vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
+	__vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
 	uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
 	const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
 
diff --git a/lib/eal/ppc/include/rte_vect.h b/lib/eal/ppc/include/rte_vect.h
index c1f0b0672c..a5f009b7df 100644
--- a/lib/eal/ppc/include/rte_vect.h
+++ b/lib/eal/ppc/include/rte_vect.h
@@ -17,7 +17,7 @@ extern "C" {
 
 #define RTE_VECT_DEFAULT_SIMD_BITWIDTH RTE_VECT_SIMD_256
 
-typedef vector signed int xmm_t;
+typedef __vector signed int xmm_t;
 
 #define	XMM_SIZE	(sizeof(xmm_t))
 #define	XMM_MASK	(XMM_SIZE - 1)
diff --git a/lib/lpm/rte_lpm_altivec.h b/lib/lpm/rte_lpm_altivec.h
index 4fbc1b595d..bab8929495 100644
--- a/lib/lpm/rte_lpm_altivec.h
+++ b/lib/lpm/rte_lpm_altivec.h
@@ -19,14 +19,14 @@ static inline void
 rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
 	uint32_t defv)
 {
-	vector signed int i24;
+	xmm_t i24;
 	rte_xmm_t i8;
 	uint32_t tbl[4];
 	uint64_t idx, pt, pt2;
 	const uint32_t *ptbl;
 
 	const uint32_t mask = UINT8_MAX;
-	const vector signed int mask8 = (xmm_t){mask, mask, mask, mask};
+	const xmm_t mask8 = (xmm_t){mask, mask, mask, mask};
 
 	/*
 	 * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 2 LPM entries
@@ -46,7 +46,7 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
 
 	/* get 4 indexes for tbl24[]. */
 	i24 = vec_sr((xmm_t) ip,
-		(vector unsigned int){CHAR_BIT, CHAR_BIT, CHAR_BIT, CHAR_BIT});
+		(__vector unsigned int){CHAR_BIT, CHAR_BIT, CHAR_BIT, CHAR_BIT});
 
 	/* extract values from tbl24[] */
 	idx = (uint32_t)i24[0];
-- 
2.35.3


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] avoid AltiVec keyword vector
  2022-05-03 12:03 [PATCH] avoid AltiVec keyword vector Thomas Monjalon
@ 2022-05-03 12:30 ` Bruce Richardson
  2022-05-03 15:44   ` Thomas Monjalon
  2022-05-04 20:40 ` David Christensen
  1 sibling, 1 reply; 6+ messages in thread
From: Bruce Richardson @ 2022-05-03 12:30 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: dev, David Christensen, Beilei Xing, Matan Azrad,
	Viacheslav Ovsiienko, Maxime Coquelin, Chenbo Xia,
	Vladimir Medvedkin

On Tue, May 03, 2022 at 02:03:21PM +0200, Thomas Monjalon wrote:
> The keyword "vector" may conflict easily.
> As a rule, it is better to use the alternative keyword "__vector".
> 

hi Thomas,

could you perhaps clarify a bit more in the log, I'm not aware of it being
a keyword generally. What would "vector" conflict with? If it's a keyword
is it possible to use it as a variable name in these files?

Thanks,
/Bruce

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] avoid AltiVec keyword vector
  2022-05-03 12:30 ` Bruce Richardson
@ 2022-05-03 15:44   ` Thomas Monjalon
  2022-05-04  6:30     ` Tyler Retzlaff
  0 siblings, 1 reply; 6+ messages in thread
From: Thomas Monjalon @ 2022-05-03 15:44 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: dev, David Christensen, Beilei Xing, Matan Azrad,
	Viacheslav Ovsiienko, Maxime Coquelin, Chenbo Xia,
	Vladimir Medvedkin

03/05/2022 14:30, Bruce Richardson:
> On Tue, May 03, 2022 at 02:03:21PM +0200, Thomas Monjalon wrote:
> > The keyword "vector" may conflict easily.
> > As a rule, it is better to use the alternative keyword "__vector".

I forgot to say that we should #undef vector.

> hi Thomas,
> 
> could you perhaps clarify a bit more in the log, I'm not aware of it being
> a keyword generally.

https://www.ibm.com/docs/fr/xl-c-and-cpp-linux/latest?topic=support-vector-types

> What would "vector" conflict with?
> If it's a keyword is it possible to use it as a variable name in these files?

It conflicts with anything named as "vector"
if you include AltiVec header file.
It is especially a problem when using C++ vector type,
that's why the keyword vector is not defined with C++:
https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/altivec.h#L45

The workaround is to #undef vector after including altivec.h,
or directly in rte_altivec.h (with a compatibility breakage).
In any case we should use only __vector keyword to allow such #undef.



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] avoid AltiVec keyword vector
  2022-05-03 15:44   ` Thomas Monjalon
@ 2022-05-04  6:30     ` Tyler Retzlaff
  0 siblings, 0 replies; 6+ messages in thread
From: Tyler Retzlaff @ 2022-05-04  6:30 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Bruce Richardson, dev, David Christensen, Beilei Xing,
	Matan Azrad, Viacheslav Ovsiienko, Maxime Coquelin, Chenbo Xia,
	Vladimir Medvedkin

On Tue, May 03, 2022 at 05:44:26PM +0200, Thomas Monjalon wrote:
> 03/05/2022 14:30, Bruce Richardson:
> > On Tue, May 03, 2022 at 02:03:21PM +0200, Thomas Monjalon wrote:
> > > The keyword "vector" may conflict easily.
> > > As a rule, it is better to use the alternative keyword "__vector".
> 
> I forgot to say that we should #undef vector.
> 
> > hi Thomas,
> > 
> > could you perhaps clarify a bit more in the log, I'm not aware of it being
> > a keyword generally.
> 
> https://www.ibm.com/docs/fr/xl-c-and-cpp-linux/latest?topic=support-vector-types
> 
> > What would "vector" conflict with?
> > If it's a keyword is it possible to use it as a variable name in these files?
> 
> It conflicts with anything named as "vector"
> if you include AltiVec header file.
> It is especially a problem when using C++ vector type,
> that's why the keyword vector is not defined with C++:
> https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/altivec.h#L45
> 
> The workaround is to #undef vector after including altivec.h,
> or directly in rte_altivec.h (with a compatibility breakage).
> In any case we should use only __vector keyword to allow such #undef.
> 

+1 please use __vector it is appropriate and the namespace defined for
the implementation. we should make as much effort as possible to avoid
contamination of the application namespace.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] avoid AltiVec keyword vector
  2022-05-03 12:03 [PATCH] avoid AltiVec keyword vector Thomas Monjalon
  2022-05-03 12:30 ` Bruce Richardson
@ 2022-05-04 20:40 ` David Christensen
  2022-05-25  8:18   ` Thomas Monjalon
  1 sibling, 1 reply; 6+ messages in thread
From: David Christensen @ 2022-05-04 20:40 UTC (permalink / raw)
  To: Thomas Monjalon, dev
  Cc: Beilei Xing, Matan Azrad, Viacheslav Ovsiienko, Maxime Coquelin,
	Chenbo Xia, Bruce Richardson, Vladimir Medvedkin

On 5/3/22 5:03 AM, Thomas Monjalon wrote:
> The keyword "vector" may conflict easily.
> As a rule, it is better to use the alternative keyword "__vector".
> 
> Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
> ---
>   drivers/net/i40e/i40e_rxtx_vec_altivec.c      |  214 ++--
>   drivers/net/mlx5/mlx5_rxtx_vec_altivec.h      | 1078 ++++++++---------
>   .../net/virtio/virtio_rxtx_simple_altivec.c   |   70 +-
>   examples/l3fwd/l3fwd_lpm_altivec.h            |   14 +-
>   lib/eal/ppc/include/rte_vect.h                |    2 +-
>   lib/lpm/rte_lpm_altivec.h                     |    6 +-
>   6 files changed, 692 insertions(+), 692 deletions(-)

Reviewed-by: David Christensen <drc@linux.vnet.ibm.com>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] avoid AltiVec keyword vector
  2022-05-04 20:40 ` David Christensen
@ 2022-05-25  8:18   ` Thomas Monjalon
  0 siblings, 0 replies; 6+ messages in thread
From: Thomas Monjalon @ 2022-05-25  8:18 UTC (permalink / raw)
  To: Bruce Richardson, David Christensen
  Cc: dev, Beilei Xing, Matan Azrad, Viacheslav Ovsiienko,
	Maxime Coquelin, Chenbo Xia, Vladimir Medvedkin

04/05/2022 22:40, David Christensen:
> On 5/3/22 5:03 AM, Thomas Monjalon wrote:
> > The keyword "vector" may conflict easily.
> > As a rule, it is better to use the alternative keyword "__vector".
> > 
> > Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
> 
> Reviewed-by: David Christensen <drc@linux.vnet.ibm.com>

Applied with a longer explanation:

    The AltiVec header file is defining "vector", except in C++ build.
    The keyword "vector" may conflict easily.
    As a rule, it is better to use the alternative keyword "__vector",
    so we will be able to #undef vector after including AltiVec header.
    
    Later it may become possible to #undef vector in rte_altivec.h
    with a compatibility breakage.




^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-05-25  8:19 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-03 12:03 [PATCH] avoid AltiVec keyword vector Thomas Monjalon
2022-05-03 12:30 ` Bruce Richardson
2022-05-03 15:44   ` Thomas Monjalon
2022-05-04  6:30     ` Tyler Retzlaff
2022-05-04 20:40 ` David Christensen
2022-05-25  8:18   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).