DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Su, Simei" <simei.su@intel.com>
To: "Guo, Jia" <jia.guo@intel.com>,
	"Zhang, Qi Z" <qi.z.zhang@intel.com>,
	"Xing, Beilei" <beilei.xing@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH] net/ice: fix GTPU down/uplink and extension conflict
Date: Fri, 24 Jul 2020 14:19:57 +0000
Message-ID: <BL0PR11MB3380633715AAB7125E9E785C9C770@BL0PR11MB3380.namprd11.prod.outlook.com> (raw)
In-Reply-To: <b2780831-b2a5-293a-7860-911bb7fe33e5@intel.com>

Hi, Guojia

> -----Original Message-----
> From: Guo, Jia <jia.guo@intel.com>
> Sent: Friday, July 24, 2020 3:13 PM
> To: Su, Simei <simei.su@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH] net/ice: fix GTPU down/uplink and extension conflict
> 
> hi, simei
> 
> On 7/24/2020 10:10 AM, Simei Su wrote:
> > When adding a RSS rule with GTPU_DWN/UP, it will search profile table
> > from the top index. If a RSS rule with GTPU_EH already exists, then
> > GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this
> > issue by removing existed GTPU_EH rule before creating a new
> > GTPU_DWN/UP rule.
> 
> 
> Suggest interpret the relation ship bettween GTPU_EH_UPLINK/DWNLINK with
> GTPU_EH to help knowledge the reason.

 Ok, I will refine the commit message.

> 
> 
> > Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS")
> >
> > Signed-off-by: Simei Su <simei.su@intel.com>
> > ---
> >   drivers/net/ice/ice_ethdev.c |  47 +++++++++++++++
> >   drivers/net/ice/ice_ethdev.h |  15 +++++
> >   drivers/net/ice/ice_hash.c   | 139
> +++++++++++++++++++++++++++++++++++++++++++
> >   3 files changed, 201 insertions(+)
> >
> > diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
> > index a4a0390..8839146 100644
> > --- a/drivers/net/ice/ice_ethdev.c
> > +++ b/drivers/net/ice/ice_ethdev.c
> > @@ -2538,6 +2538,11 @@ static int ice_parse_devargs(struct rte_eth_dev
> *dev)
> >   		if (ret)
> >   			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
> >   				    __func__, ret);
> 
> 
> A blank line need.

 Ok.

> 
> 
> > +		/* Store hash field and header for gtpu_eh ipv4 */
> > +		pf->gtpu_eh.ipv4.hash_fld = ICE_FLOW_HASH_IPV4;
> > +		pf->gtpu_eh.ipv4.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH |
> > +					   ICE_FLOW_SEG_HDR_IPV4 |
> > +					   ICE_FLOW_SEG_HDR_IPV_OTHER;
> >
> >   		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
> >   				ICE_FLOW_SEG_HDR_PPPOE |
> > @@ -2564,6 +2569,11 @@ static int ice_parse_devargs(struct rte_eth_dev
> *dev)
> >   		if (ret)
> >   			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
> >   				    __func__, ret);
> > +		/* Store hash field and header for gtpu_eh ipv6 */
> > +		pf->gtpu_eh.ipv6.hash_fld = ICE_FLOW_HASH_IPV6;
> > +		pf->gtpu_eh.ipv6.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH |
> > +					   ICE_FLOW_SEG_HDR_IPV6 |
> > +					   ICE_FLOW_SEG_HDR_IPV_OTHER;
> >
> >   		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
> >   				ICE_FLOW_SEG_HDR_PPPOE |
> > @@ -2586,6 +2596,9 @@ static int ice_parse_devargs(struct rte_eth_dev
> *dev)
> >   		if (ret)
> >   			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
> >   				    __func__, ret);
> > +		/* Store hash field and header for gtpu_eh ipv4_udp */
> > +		pf->gtpu_eh.ipv4_udp.hash_fld = ICE_HASH_UDP_IPV4;
> > +		pf->gtpu_eh.ipv4_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH;
> >
> >   		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
> >   				ICE_FLOW_SEG_HDR_PPPOE, 0);
> > @@ -2606,6 +2619,9 @@ static int ice_parse_devargs(struct rte_eth_dev
> *dev)
> >   		if (ret)
> >   			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
> >   				    __func__, ret);
> > +		/* Store hash field and header for gtpu_eh ipv6_udp */
> > +		pf->gtpu_eh.ipv6_udp.hash_fld = ICE_HASH_UDP_IPV6;
> > +		pf->gtpu_eh.ipv6_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH;
> >
> >   		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
> >   				ICE_FLOW_SEG_HDR_PPPOE, 0);
> > @@ -2626,6 +2642,9 @@ static int ice_parse_devargs(struct rte_eth_dev
> *dev)
> >   		if (ret)
> >   			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
> >   				    __func__, ret);
> > +		/* Store hash field and header for gtpu_eh ipv4_tcp */
> > +		pf->gtpu_eh.ipv4_tcp.hash_fld = ICE_HASH_TCP_IPV4;
> > +		pf->gtpu_eh.ipv4_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH;
> >
> >   		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
> >   				ICE_FLOW_SEG_HDR_PPPOE, 0);
> > @@ -2646,6 +2665,9 @@ static int ice_parse_devargs(struct rte_eth_dev
> *dev)
> >   		if (ret)
> >   			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
> >   				    __func__, ret);
> > +		/* Store hash field and header for gtpu_eh ipv6_tcp */
> > +		pf->gtpu_eh.ipv6_tcp.hash_fld = ICE_HASH_TCP_IPV6;
> > +		pf->gtpu_eh.ipv6_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH;
> >
> >   		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
> >   				ICE_FLOW_SEG_HDR_PPPOE, 0);
> > @@ -2695,6 +2717,28 @@ static int ice_parse_devargs(struct rte_eth_dev
> *dev)
> >   	}
> >   }
> >
> > +static void
> > +ice_rss_ctx_init(struct ice_pf *pf)
> > +{
> > +	pf->gtpu_eh.ipv4.hash_fld = 0;
> > +	pf->gtpu_eh.ipv4.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv6.hash_fld = 0;
> > +	pf->gtpu_eh.ipv6.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv4_udp.hash_fld = 0;
> > +	pf->gtpu_eh.ipv4_udp.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv6_udp.hash_fld = 0;
> > +	pf->gtpu_eh.ipv6_udp.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv4_tcp.hash_fld = 0;
> > +	pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv6_tcp.hash_fld = 0;
> > +	pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0;
> > +}
> > +
> >   static int ice_init_rss(struct ice_pf *pf)
> >   {
> >   	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > @@ -2755,6 +2799,9 @@ static int ice_init_rss(struct ice_pf *pf)
> >   		(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
> >   	ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
> >
> > +	/* Initialize RSS context for gtpu_eh */
> > +	ice_rss_ctx_init(pf);
> > +
> >   	/* RSS hash configuration */
> >   	ice_rss_hash_set(pf, rss_conf->rss_hf);
> >
> > diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> > index 87984ef..1baf0b4 100644
> > --- a/drivers/net/ice/ice_ethdev.h
> > +++ b/drivers/net/ice/ice_ethdev.h
> > @@ -358,6 +358,20 @@ struct ice_fdir_info {
> >   	struct ice_fdir_counter_pool_container counter;
> >   };
> >
> > +struct ice_gtpu_eh {
> > +	uint32_t pkt_hdr;
> > +	uint64_t hash_fld;
> > +};
> > +
> 
> 
> The naming "ice_gtpu_eh" is not clear, ice_hash_gtpu_eh_ctx?
> 
 Ok, I will rename it to make it clear.

> 
> > +struct ice_hash_gtpu_eh {
> > +	struct ice_gtpu_eh ipv4;
> > +	struct ice_gtpu_eh ipv6;
> > +	struct ice_gtpu_eh ipv4_udp;
> > +	struct ice_gtpu_eh ipv6_udp;
> > +	struct ice_gtpu_eh ipv4_tcp;
> > +	struct ice_gtpu_eh ipv6_tcp;
> > +};
> > +
> 
> 
> I think you don't need to define struct for each pattern and set/unset
> value for all of them, what you considate just 3 item, that are hdr,
> hash filed and the pattern type, so you could just defined as below
> 
> struct ice_hash_gtpu_eh_ctx {
> 
>              uint32_t pkt_hdr;
> 
>      uint64_t hash_fld;
> 
>                 uint64_t hdr_hint;    // for example: hdr =
> ICE_FLOW_SEG_HDR_IPV4 |  ICE_FLOW_SEG_HDR_UDP
> 
> }
> 
> and then only check this hdr_hint when set uplink/dwnlink
> 
> if(hdr & pf->gtpu_eh_ctx->hdr_hint)
> 
>      ice_rem_rss_cfg

  Because we should also remember default rule context, so we still need to define structure for each pattern.

> 
> >   struct ice_pf {
> >   	struct ice_adapter *adapter; /* The adapter this PF associate to */
> >   	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
> > @@ -381,6 +395,7 @@ struct ice_pf {
> >   	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
> >   	uint16_t fdir_qp_offset;
> >   	struct ice_fdir_info fdir; /* flow director info */
> > +	struct ice_hash_gtpu_eh gtpu_eh;
> >   	uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
> >   	uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
> >   	struct ice_hw_port_stats stats_offset;
> > diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
> > index e535e4b..dd70353 100644
> > --- a/drivers/net/ice/ice_hash.c
> > +++ b/drivers/net/ice/ice_hash.c
> > @@ -1232,6 +1232,117 @@ struct ice_hash_match_type
> ice_hash_type_list[] = {
> >   }
> >
> >   static int
> > +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld)
> > +{
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	struct ice_vsi *vsi = pf->main_vsi;
> > +	int ret;
> > +
> > +	/**
> > +	 * If header field contains GTPU_EH, store gtpu_eh context.
> > +	 * If header field contains GTPU_DWN/UP, remove existed gtpu_eh.
> > +	 */
> > +	if ((hdr & ICE_FLOW_SEG_HDR_GTPU_EH) &&
> > +	    (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
> > +		    ICE_FLOW_SEG_HDR_GTPU_UP)) == 0) {
> 
> 
> No need to check !=DWN/UP here, EH/DWN/UP are mutual exclusion, they
> also handler when  parse pattern.

 Ok. I thought EH and DWN/UP could co-exist.

> 
> 
> > +		if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
> > +			(hdr & ICE_FLOW_SEG_HDR_UDP)) {
> 
> 
> Alignment should match open parenthesis. Below is the same.

 OK.

> 
> 
> > +			pf->gtpu_eh.ipv4_udp.pkt_hdr = hdr;
> > +			pf->gtpu_eh.ipv4_udp.hash_fld = fld;
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
> > +			(hdr & ICE_FLOW_SEG_HDR_UDP)) {
> > +			pf->gtpu_eh.ipv6_udp.pkt_hdr = hdr;
> > +			pf->gtpu_eh.ipv6_udp.hash_fld = fld;
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
> > +			(hdr & ICE_FLOW_SEG_HDR_TCP)) {
> > +			pf->gtpu_eh.ipv4_tcp.pkt_hdr = hdr;
> > +			pf->gtpu_eh.ipv4_tcp.hash_fld = fld;
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
> > +			(hdr & ICE_FLOW_SEG_HDR_TCP)) {
> > +			pf->gtpu_eh.ipv6_tcp.pkt_hdr = hdr;
> > +			pf->gtpu_eh.ipv6_tcp.hash_fld = fld;
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
> > +			(hdr & (ICE_FLOW_SEG_HDR_UDP |
> > +				ICE_FLOW_SEG_HDR_TCP)) == 0) {
> > +			pf->gtpu_eh.ipv4.pkt_hdr = hdr;
> > +			pf->gtpu_eh.ipv4.hash_fld = fld;
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
> > +			(hdr & (ICE_FLOW_SEG_HDR_UDP |
> > +				ICE_FLOW_SEG_HDR_TCP)) == 0) {
> > +			pf->gtpu_eh.ipv6.pkt_hdr = hdr;
> > +			pf->gtpu_eh.ipv6.hash_fld = fld;
> > +		}
> > +	} else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
> > +		   ICE_FLOW_SEG_HDR_GTPU_UP)) {
> > +		if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
> > +			(hdr & ICE_FLOW_SEG_HDR_UDP)) {
> > +			if (pf->gtpu_eh.ipv4_udp.hash_fld &&
> > +				pf->gtpu_eh.ipv4_udp.pkt_hdr) {
> > +				ret = ice_rem_rss_cfg(hw, vsi->idx,
> > +					pf->gtpu_eh.ipv4_udp.hash_fld,
> > +					pf->gtpu_eh.ipv4_udp.pkt_hdr);
> > +				if (ret)
> > +					return -rte_errno;
> > +			}
> 
> 
> Is it better to use a local variable and then call ice_rem_rss_cfg one
> time after if-else?

 Yes, you are right. I will clean code in next version. Thanks.

Br
Simei

> 
> 
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
> > +			(hdr & ICE_FLOW_SEG_HDR_UDP)) {
> > +			if (pf->gtpu_eh.ipv6_udp.hash_fld &&
> > +				pf->gtpu_eh.ipv6_udp.pkt_hdr) {
> > +				ret = ice_rem_rss_cfg(hw, vsi->idx,
> > +					pf->gtpu_eh.ipv6_udp.hash_fld,
> > +					pf->gtpu_eh.ipv6_udp.pkt_hdr);
> > +				if (ret)
> > +					return -rte_errno;
> > +			}
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
> > +			(hdr & ICE_FLOW_SEG_HDR_TCP)) {
> > +			if (pf->gtpu_eh.ipv4_tcp.hash_fld &&
> > +				pf->gtpu_eh.ipv4_tcp.pkt_hdr) {
> > +				ret = ice_rem_rss_cfg(hw, vsi->idx,
> > +					pf->gtpu_eh.ipv4_tcp.hash_fld,
> > +					pf->gtpu_eh.ipv4_tcp.pkt_hdr);
> > +				if (ret)
> > +					return -rte_errno;
> > +			}
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
> > +			(hdr & ICE_FLOW_SEG_HDR_TCP)) {
> > +			if (pf->gtpu_eh.ipv6_tcp.hash_fld &&
> > +				pf->gtpu_eh.ipv6_tcp.pkt_hdr) {
> > +				ret = ice_rem_rss_cfg(hw, vsi->idx,
> > +					pf->gtpu_eh.ipv6_tcp.hash_fld,
> > +					pf->gtpu_eh.ipv6_tcp.pkt_hdr);
> > +				if (ret)
> > +					return -rte_errno;
> > +			}
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
> > +			(hdr & (ICE_FLOW_SEG_HDR_UDP |
> > +				ICE_FLOW_SEG_HDR_TCP)) == 0) {
> > +			if (pf->gtpu_eh.ipv4.hash_fld &&
> > +				pf->gtpu_eh.ipv4.pkt_hdr) {
> > +				ret = ice_rem_rss_cfg(hw, vsi->idx,
> > +					pf->gtpu_eh.ipv4.hash_fld,
> > +					pf->gtpu_eh.ipv4.pkt_hdr);
> > +				if (ret)
> > +					return -rte_errno;
> > +			}
> > +		} else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
> > +			(hdr & (ICE_FLOW_SEG_HDR_UDP |
> > +				ICE_FLOW_SEG_HDR_TCP)) == 0) {
> > +			if (pf->gtpu_eh.ipv6.hash_fld &&
> > +				pf->gtpu_eh.ipv6.pkt_hdr) {
> > +				ret = ice_rem_rss_cfg(hw, vsi->idx,
> > +					pf->gtpu_eh.ipv6.hash_fld,
> > +					pf->gtpu_eh.ipv6.pkt_hdr);
> > +				if (ret)
> > +					return -rte_errno;
> > +			}
> > +		}
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> >   ice_hash_create(struct ice_adapter *ad,
> >   		struct rte_flow *flow,
> >   		void *meta,
> > @@ -1248,6 +1359,10 @@ struct ice_hash_match_type ice_hash_type_list[]
> = {
> >   	uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds;
> >   	uint8_t hash_function = ((struct rss_meta *)meta)->hash_function;
> >
> > +	ret = ice_add_rss_cfg_pre(pf, headermask, hash_field);
> > +	if (ret)
> > +		return -rte_errno;
> > +
> >   	filter_ptr = rte_zmalloc("ice_rss_filter",
> >   				sizeof(struct ice_hash_flow_cfg), 0);
> >   	if (!filter_ptr) {
> > @@ -1297,6 +1412,28 @@ struct ice_hash_match_type ice_hash_type_list[]
> = {
> >   	return -rte_errno;
> >   }
> >
> > +static void
> > +ice_rem_rss_cfg_post(struct ice_pf *pf)
> > +{
> > +	pf->gtpu_eh.ipv4.hash_fld = 0;
> > +	pf->gtpu_eh.ipv4.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv6.hash_fld = 0;
> > +	pf->gtpu_eh.ipv6.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv4_udp.hash_fld = 0;
> > +	pf->gtpu_eh.ipv4_udp.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv6_udp.hash_fld = 0;
> > +	pf->gtpu_eh.ipv6_udp.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv4_tcp.hash_fld = 0;
> > +	pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0;
> > +
> > +	pf->gtpu_eh.ipv6_tcp.hash_fld = 0;
> > +	pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0;
> > +}
> > +
> >   static int
> >   ice_hash_destroy(struct ice_adapter *ad,
> >   		struct rte_flow *flow,
> > @@ -1334,6 +1471,8 @@ struct ice_hash_match_type ice_hash_type_list[]
> = {
> >   		}
> >   	}
> >
> > +	ice_rem_rss_cfg_post(pf);
> > +
> >   	rte_free(filter_ptr);
> >   	return 0;
> >

  reply	other threads:[~2020-07-24 14:20 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-24  2:10 Simei Su
2020-07-24  7:13 ` Jeff Guo
2020-07-24 14:19   ` Su, Simei [this message]
2020-07-24 14:41 ` [dpdk-dev] [PATCH v2] " Simei Su
2020-07-26  3:13   ` [dpdk-dev] [PATCH v3] " Simei Su
2020-07-27  9:38     ` [dpdk-dev] [PATCH v4] " Simei Su
2020-07-28  8:44       ` [dpdk-dev] [PATCH v5] " Simei Su
2020-07-28 11:07         ` [dpdk-dev] [PATCH v6] " Simei Su

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=BL0PR11MB3380633715AAB7125E9E785C9C770@BL0PR11MB3380.namprd11.prod.outlook.com \
    --to=simei.su@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jia.guo@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git