patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks
@ 2020-06-12 22:07 Rahul Lakkireddy
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 1/5] net/cxgbe: fix CLIP leak in filter error path Rahul Lakkireddy
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Rahul Lakkireddy @ 2020-06-12 22:07 UTC (permalink / raw)
  To: dev; +Cc: stable, nirranjan

This series of patches fix various hardware resource leaks in flow
create failure and flow destroy paths.

Patch 1 fixes Compressed Local IP (CLIP) entry leaks.

Patch 2 fixes Layer 2 Table (L2T) entry leaks.

Patch 3 fixes double Multi Port Switch (MPS) entry allocations due
to flow validate and create, but only freed once during flow destroy.

Patch 4 fixes Source MAC Table (SMT) entry leaks.

Patch 5 fixes flow validation errors seen due to default masks set
for unrequested fields.

Thanks,
Rahul

Rahul Lakkireddy (5):
  net/cxgbe: fix CLIP leak in filter error path
  net/cxgbe: fix L2T leak in filter error and free path
  net/cxgbe: fix double MPS alloc due to flow validate and create
  net/cxgbe: fix SMT leak in filter error and free path
  net/cxgbe: ignore flow default masks for unrequested fields

 drivers/net/cxgbe/cxgbe_filter.c | 158 ++++++++++++++++++-------------
 drivers/net/cxgbe/cxgbe_filter.h |   4 +-
 drivers/net/cxgbe/cxgbe_flow.c   | 137 +++++++++++++++------------
 drivers/net/cxgbe/smt.c          |   6 ++
 drivers/net/cxgbe/smt.h          |   1 +
 5 files changed, 180 insertions(+), 126 deletions(-)

-- 
2.24.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-stable] [PATCH 1/5] net/cxgbe: fix CLIP leak in filter error path
  2020-06-12 22:07 [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Rahul Lakkireddy
@ 2020-06-12 22:07 ` Rahul Lakkireddy
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 2/5] net/cxgbe: fix L2T leak in filter error and free path Rahul Lakkireddy
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Rahul Lakkireddy @ 2020-06-12 22:07 UTC (permalink / raw)
  To: dev; +Cc: stable, nirranjan

Free up Compressed Local IP (CLIP) entry properly during filter
creation failure path. Also consolidate all various tables
cleanup to a common function and invoke it from both wild-card
and exact-match filter paths.

Fixes: af44a577988b ("net/cxgbe: support to offload flows to HASH region")
Cc: stable@dpdk.org

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
---
 drivers/net/cxgbe/cxgbe_filter.c | 68 +++++++++++++++-----------------
 1 file changed, 31 insertions(+), 37 deletions(-)

diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
index 27e96c73e..45602d468 100644
--- a/drivers/net/cxgbe/cxgbe_filter.c
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -284,6 +284,22 @@ int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
 	return pos < size ? pos : -1;
 }
 
+/**
+ * Clear a filter and release any of its resources that we own.  This also
+ * clears the filter's "pending" status.
+ */
+static void clear_filter(struct filter_entry *f)
+{
+	if (f->clipt)
+		cxgbe_clip_release(f->dev, f->clipt);
+
+	/* The zeroing of the filter rule below clears the filter valid,
+	 * pending, locked flags etc. so it's all we need for
+	 * this operation.
+	 */
+	memset(f, 0, sizeof(*f));
+}
+
 /**
  * Construct hash filter ntuple.
  */
@@ -583,7 +599,7 @@ static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
 
 	f = t4_os_alloc(sizeof(*f));
 	if (!f)
-		goto out_err;
+		return -ENOMEM;
 
 	f->fs = *fs;
 	f->ctx = ctx;
@@ -631,7 +647,7 @@ static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
 		mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
 		if (!mbuf) {
 			ret = -ENOMEM;
-			goto free_clip;
+			goto free_atid;
 		}
 
 		mbuf->data_len = size;
@@ -661,33 +677,15 @@ static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
 	t4_mgmt_tx(ctrlq, mbuf);
 	return 0;
 
-free_clip:
-	cxgbe_clip_release(f->dev, f->clipt);
 free_atid:
 	cxgbe_free_atid(t, atid);
 
 out_err:
+	clear_filter(f);
 	t4_os_free(f);
 	return ret;
 }
 
-/**
- * Clear a filter and release any of its resources that we own.  This also
- * clears the filter's "pending" status.
- */
-static void clear_filter(struct filter_entry *f)
-{
-	if (f->clipt)
-		cxgbe_clip_release(f->dev, f->clipt);
-
-	/*
-	 * The zeroing of the filter rule below clears the filter valid,
-	 * pending, locked flags etc. so it's all we need for
-	 * this operation.
-	 */
-	memset(f, 0, sizeof(*f));
-}
-
 /**
  * t4_mk_filtdelwr - create a delete filter WR
  * @adap: adapter context
@@ -1070,16 +1068,6 @@ int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
 		return ret;
 	}
 
-	/*
-	 * Allocate a clip table entry only if we have non-zero IPv6 address
-	 */
-	if (chip_ver > CHELSIO_T5 && fs->type &&
-	    memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
-		f->clipt = cxgbe_clip_alloc(dev, (u32 *)&fs->val.lip);
-		if (!f->clipt)
-			goto free_tid;
-	}
-
 	/*
 	 * Convert the filter specification into our internal format.
 	 * We copy the PF/VF specification into the Outer VLAN field
@@ -1090,6 +1078,16 @@ int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
 	f->fs.iq = iq;
 	f->dev = dev;
 
+	/* Allocate a clip table entry only if we have non-zero IPv6 address. */
+	if (chip_ver > CHELSIO_T5 && f->fs.type &&
+	    memcmp(f->fs.val.lip, bitoff, sizeof(bitoff))) {
+		f->clipt = cxgbe_clip_alloc(dev, (u32 *)&f->fs.val.lip);
+		if (!f->clipt) {
+			ret = -ENOMEM;
+			goto free_tid;
+		}
+	}
+
 	iconf = adapter->params.tp.ingress_config;
 
 	/* Either PFVF or OVLAN can be active, but not both
@@ -1192,6 +1190,7 @@ void cxgbe_hash_filter_rpl(struct adapter *adap,
 		}
 
 		cxgbe_free_atid(t, ftid);
+		clear_filter(f);
 		t4_os_free(f);
 	}
 
@@ -1416,13 +1415,8 @@ void cxgbe_hash_del_filter_rpl(struct adapter *adap,
 	}
 
 	ctx = f->ctx;
-	f->ctx = NULL;
-
-	f->valid = 0;
-
-	if (f->clipt)
-		cxgbe_clip_release(f->dev, f->clipt);
 
+	clear_filter(f);
 	cxgbe_remove_tid(t, 0, tid, 0);
 	t4_os_free(f);
 
-- 
2.24.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-stable] [PATCH 2/5] net/cxgbe: fix L2T leak in filter error and free path
  2020-06-12 22:07 [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Rahul Lakkireddy
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 1/5] net/cxgbe: fix CLIP leak in filter error path Rahul Lakkireddy
@ 2020-06-12 22:07 ` Rahul Lakkireddy
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 3/5] net/cxgbe: fix double MPS alloc due to flow validate and create Rahul Lakkireddy
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Rahul Lakkireddy @ 2020-06-12 22:07 UTC (permalink / raw)
  To: dev; +Cc: stable, nirranjan

Free up Layer 2 Table (L2T) entry properly during filter create
failure and filter delete.

Fixes: 1decc62b1cbe ("net/cxgbe: add flow operations to offload VLAN actions")
Cc: stable@dpdk.org

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
---
 drivers/net/cxgbe/cxgbe_filter.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
index 45602d468..06233e41e 100644
--- a/drivers/net/cxgbe/cxgbe_filter.c
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -293,6 +293,9 @@ static void clear_filter(struct filter_entry *f)
 	if (f->clipt)
 		cxgbe_clip_release(f->dev, f->clipt);
 
+	if (f->l2t)
+		cxgbe_l2t_release(f->l2t);
+
 	/* The zeroing of the filter rule below clears the filter valid,
 	 * pending, locked flags etc. so it's all we need for
 	 * this operation.
@@ -755,20 +758,6 @@ static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
 	unsigned int port_id = ethdev2pinfo(dev)->port_id;
 	int ret;
 
-	/*
-	 * If the new filter requires loopback Destination MAC and/or VLAN
-	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
-	 * the filter.
-	 */
-	if (f->fs.newvlan || f->fs.newdmac) {
-		/* allocate L2T entry for new filter */
-		f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
-						   f->fs.eport, f->fs.dmac);
-
-		if (!f->l2t)
-			return -ENOMEM;
-	}
-
 	/* If the new filter requires Source MAC rewriting then we need to
 	 * allocate a SMT entry for the filter
 	 */
@@ -1088,6 +1077,19 @@ int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
 		}
 	}
 
+	/* If the new filter requires loopback Destination MAC and/or VLAN
+	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+	 * the filter.
+	 */
+	if (f->fs.newvlan || f->fs.newdmac) {
+		f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
+						   f->fs.eport, f->fs.dmac);
+		if (!f->l2t) {
+			ret = -ENOMEM;
+			goto free_tid;
+		}
+	}
+
 	iconf = adapter->params.tp.ingress_config;
 
 	/* Either PFVF or OVLAN can be active, but not both
-- 
2.24.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-stable] [PATCH 3/5] net/cxgbe: fix double MPS alloc due to flow validate and create
  2020-06-12 22:07 [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Rahul Lakkireddy
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 1/5] net/cxgbe: fix CLIP leak in filter error path Rahul Lakkireddy
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 2/5] net/cxgbe: fix L2T leak in filter error and free path Rahul Lakkireddy
@ 2020-06-12 22:07 ` Rahul Lakkireddy
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 4/5] net/cxgbe: fix SMT leak in filter error and free path Rahul Lakkireddy
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Rahul Lakkireddy @ 2020-06-12 22:07 UTC (permalink / raw)
  To: dev; +Cc: stable, nirranjan

The Multi Port Switch (MPS) entry is allocated twice when both
flow validate and create are invoked, but only freed once during
flow destroy. Avoid double alloc by moving MPS entry allocation
closer to when the filter create request is sent to hardware and
will be ignored for filter validate request.

Fixes: fefee7a619a4 ("net/cxgbe: add flow ops to match based on dest MAC")
Cc: stable@dpdk.org

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
---
 drivers/net/cxgbe/cxgbe_filter.c | 32 ++++++++++++++++++++++++++++++++
 drivers/net/cxgbe/cxgbe_filter.h |  4 +++-
 drivers/net/cxgbe/cxgbe_flow.c   | 28 +++-------------------------
 3 files changed, 38 insertions(+), 26 deletions(-)

diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
index 06233e41e..317830f58 100644
--- a/drivers/net/cxgbe/cxgbe_filter.c
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -8,6 +8,7 @@
 #include "base/t4_tcb.h"
 #include "base/t4_regs.h"
 #include "cxgbe_filter.h"
+#include "mps_tcam.h"
 #include "clip_tbl.h"
 #include "l2t.h"
 #include "smt.h"
@@ -290,12 +291,17 @@ int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
  */
 static void clear_filter(struct filter_entry *f)
 {
+	struct port_info *pi = ethdev2pinfo(f->dev);
+
 	if (f->clipt)
 		cxgbe_clip_release(f->dev, f->clipt);
 
 	if (f->l2t)
 		cxgbe_l2t_release(f->l2t);
 
+	if (f->fs.mask.macidx)
+		cxgbe_mpstcam_remove(pi, f->fs.val.macidx);
+
 	/* The zeroing of the filter rule below clears the filter valid,
 	 * pending, locked flags etc. so it's all we need for
 	 * this operation.
@@ -609,6 +615,19 @@ static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
 	f->dev = dev;
 	f->fs.iq = iq;
 
+	/* Allocate MPS TCAM entry to match Destination MAC. */
+	if (f->fs.mask.macidx) {
+		int idx;
+
+		idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
+		if (idx <= 0) {
+			ret = -ENOMEM;
+			goto out_err;
+		}
+
+		f->fs.val.macidx = idx;
+	}
+
 	/*
 	 * If the new filter requires loopback Destination MAC and/or VLAN
 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
@@ -1067,6 +1086,19 @@ int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
 	f->fs.iq = iq;
 	f->dev = dev;
 
+	/* Allocate MPS TCAM entry to match Destination MAC. */
+	if (f->fs.mask.macidx) {
+		int idx;
+
+		idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
+		if (idx <= 0) {
+			ret = -ENOMEM;
+			goto free_tid;
+		}
+
+		f->fs.val.macidx = idx;
+	}
+
 	/* Allocate a clip table entry only if we have non-zero IPv6 address. */
 	if (chip_ver > CHELSIO_T5 && f->fs.type &&
 	    memcmp(f->fs.val.lip, bitoff, sizeof(bitoff))) {
diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h
index e79c052de..46ebf8333 100644
--- a/drivers/net/cxgbe/cxgbe_filter.h
+++ b/drivers/net/cxgbe/cxgbe_filter.h
@@ -69,8 +69,10 @@ struct ch_filter_tuple {
 	uint16_t lport;		/* local port */
 	uint16_t fport;		/* foreign port */
 
+	uint8_t dmac[6];        /* Destination MAC to match */
+
 	/* reservations for future additions */
-	uint8_t rsvd[12];
+	uint8_t rsvd[6];
 };
 
 /*
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
index 166c39ba5..dd8ee7bbd 100644
--- a/drivers/net/cxgbe/cxgbe_flow.c
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -194,20 +194,9 @@ ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
 					  "src mac filtering not supported");
 
 	if (!rte_is_zero_ether_addr(&mask->dst)) {
-		const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
-		const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
-		struct rte_flow *flow = (struct rte_flow *)fs->private;
-		struct port_info *pi = (struct port_info *)
-					(flow->dev->data->dev_private);
-		int idx;
-
-		idx = cxgbe_mpstcam_alloc(pi, addr, m);
-		if (idx <= 0)
-			return rte_flow_error_set(e, idx,
-						  RTE_FLOW_ERROR_TYPE_ITEM,
-						  NULL, "unable to allocate mac"
-						  " entry in h/w");
-		CXGBE_FILL_FS(idx, 0x1ff, macidx);
+		CXGBE_FILL_FS(0, 0x1ff, macidx);
+		CXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,
+				     dmac);
 	}
 
 	CXGBE_FILL_FS(be16_to_cpu(spec->type),
@@ -1212,17 +1201,6 @@ static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 		return ctx.result;
 	}
 
-	fs = &flow->fs;
-	if (fs->mask.macidx) {
-		struct port_info *pi = (struct port_info *)
-					(dev->data->dev_private);
-		int ret;
-
-		ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
-		if (!ret)
-			return ret;
-	}
-
 	return 0;
 }
 
-- 
2.24.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-stable] [PATCH 4/5] net/cxgbe: fix SMT leak in filter error and free path
  2020-06-12 22:07 [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Rahul Lakkireddy
                   ` (2 preceding siblings ...)
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 3/5] net/cxgbe: fix double MPS alloc due to flow validate and create Rahul Lakkireddy
@ 2020-06-12 22:07 ` Rahul Lakkireddy
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 5/5] net/cxgbe: ignore flow default masks for unrequested fields Rahul Lakkireddy
  2020-06-17 13:33 ` [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Ferruh Yigit
  5 siblings, 0 replies; 7+ messages in thread
From: Rahul Lakkireddy @ 2020-06-12 22:07 UTC (permalink / raw)
  To: dev; +Cc: stable, nirranjan

Free up Source MAC Table (SMT) entry properly during filter create
failure and filter delete.

Fixes: 993541b2fa4f ("net/cxgbe: support flow API for source MAC rewrite")
Cc: stable@dpdk.org

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
---
 drivers/net/cxgbe/cxgbe_filter.c | 28 ++++++++++++++--------------
 drivers/net/cxgbe/smt.c          |  6 ++++++
 drivers/net/cxgbe/smt.h          |  1 +
 3 files changed, 21 insertions(+), 14 deletions(-)

diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
index 317830f58..6066da7db 100644
--- a/drivers/net/cxgbe/cxgbe_filter.c
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -302,6 +302,9 @@ static void clear_filter(struct filter_entry *f)
 	if (f->fs.mask.macidx)
 		cxgbe_mpstcam_remove(pi, f->fs.val.macidx);
 
+	if (f->smt)
+		cxgbe_smt_release(f->smt);
+
 	/* The zeroing of the filter rule below clears the filter valid,
 	 * pending, locked flags etc. so it's all we need for
 	 * this operation.
@@ -777,20 +780,6 @@ static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
 	unsigned int port_id = ethdev2pinfo(dev)->port_id;
 	int ret;
 
-	/* If the new filter requires Source MAC rewriting then we need to
-	 * allocate a SMT entry for the filter
-	 */
-	if (f->fs.newsmac) {
-		f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
-		if (!f->smt) {
-			if (f->l2t) {
-				cxgbe_l2t_release(f->l2t);
-				f->l2t = NULL;
-			}
-			return -ENOMEM;
-		}
-	}
-
 	ctrlq = &adapter->sge.ctrlq[port_id];
 	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
 	if (!mbuf) {
@@ -1122,6 +1111,17 @@ int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
 		}
 	}
 
+	/* If the new filter requires Source MAC rewriting then we need to
+	 * allocate a SMT entry for the filter
+	 */
+	if (f->fs.newsmac) {
+		f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
+		if (!f->smt) {
+			ret = -ENOMEM;
+			goto free_tid;
+		}
+	}
+
 	iconf = adapter->params.tp.ingress_config;
 
 	/* Either PFVF or OVLAN can be active, but not both
diff --git a/drivers/net/cxgbe/smt.c b/drivers/net/cxgbe/smt.c
index e8f38676e..b7b5a4a02 100644
--- a/drivers/net/cxgbe/smt.c
+++ b/drivers/net/cxgbe/smt.c
@@ -193,6 +193,12 @@ struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
 	return t4_smt_alloc_switching(dev, 0x0, smac);
 }
 
+void cxgbe_smt_release(struct smt_entry *e)
+{
+	if (rte_atomic32_read(&e->refcnt))
+		rte_atomic32_dec(&e->refcnt);
+}
+
 /**
  * Initialize Source MAC Table
  */
diff --git a/drivers/net/cxgbe/smt.h b/drivers/net/cxgbe/smt.h
index be1fab8ba..92c63c876 100644
--- a/drivers/net/cxgbe/smt.h
+++ b/drivers/net/cxgbe/smt.h
@@ -39,6 +39,7 @@ void t4_cleanup_smt(struct adapter *adap);
 void cxgbe_do_smt_write_rpl(struct adapter *adap,
 			    const struct cpl_smt_write_rpl *rpl);
 struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac);
+void cxgbe_smt_release(struct smt_entry *e);
 
 #endif  /* __CXGBE_SMT_H_ */
 
-- 
2.24.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-stable] [PATCH 5/5] net/cxgbe: ignore flow default masks for unrequested fields
  2020-06-12 22:07 [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Rahul Lakkireddy
                   ` (3 preceding siblings ...)
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 4/5] net/cxgbe: fix SMT leak in filter error and free path Rahul Lakkireddy
@ 2020-06-12 22:07 ` Rahul Lakkireddy
  2020-06-17 13:33 ` [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Ferruh Yigit
  5 siblings, 0 replies; 7+ messages in thread
From: Rahul Lakkireddy @ 2020-06-12 22:07 UTC (permalink / raw)
  To: dev; +Cc: stable, nirranjan

commit 536db938a444 ("net/cxgbe: add devargs to control filtermode and
filtermask") allows configuring hardware to select specific combination
of header fields to match in the incoming packets. However, the default
mask is set for all fields in the requested pattern items, even if the
field is not explicitly set in the combination and results in
validation errors. To prevent this, ignore setting the default masks
for the unrequested fields and the hardware will also ignore them in
validation, accordingly. Also, tweak the filter spec before finalizing
the masks.

Fixes: 536db938a444 ("net/cxgbe: add devargs to control filtermode and filtermask")
Cc: stable@dpdk.org

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
---
 drivers/net/cxgbe/cxgbe_flow.c | 109 ++++++++++++++++++++++-----------
 1 file changed, 74 insertions(+), 35 deletions(-)

diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
index dd8ee7bbd..f7c4f3696 100644
--- a/drivers/net/cxgbe/cxgbe_flow.c
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -188,19 +188,22 @@ ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
 		return 0;
 
 	/* we don't support SRC_MAC filtering*/
-	if (!rte_is_zero_ether_addr(&mask->src))
+	if (!rte_is_zero_ether_addr(&spec->src) ||
+	    (umask && !rte_is_zero_ether_addr(&umask->src)))
 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
 					  item,
 					  "src mac filtering not supported");
 
-	if (!rte_is_zero_ether_addr(&mask->dst)) {
+	if (!rte_is_zero_ether_addr(&spec->dst) ||
+	    (umask && !rte_is_zero_ether_addr(&umask->dst))) {
 		CXGBE_FILL_FS(0, 0x1ff, macidx);
 		CXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,
 				     dmac);
 	}
 
-	CXGBE_FILL_FS(be16_to_cpu(spec->type),
-		      be16_to_cpu(mask->type), ethtype);
+	if (spec->type || (umask && umask->type))
+		CXGBE_FILL_FS(be16_to_cpu(spec->type),
+			      be16_to_cpu(mask->type), ethtype);
 
 	return 0;
 }
@@ -224,7 +227,8 @@ ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
 					  item,
 					  "port index up to 0x7 is supported");
 
-	CXGBE_FILL_FS(val->index, mask->index, iport);
+	if (val->index || (umask && umask->index))
+		CXGBE_FILL_FS(val->index, mask->index, iport);
 
 	return 0;
 }
@@ -265,24 +269,24 @@ ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
 	if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
 		CXGBE_FILL_FS(1, 1, ovlan_vld);
 		if (spec) {
-			CXGBE_FILL_FS(be16_to_cpu(spec->tci),
-				      be16_to_cpu(mask->tci), ovlan);
-
+			if (spec->tci || (umask && umask->tci))
+				CXGBE_FILL_FS(be16_to_cpu(spec->tci),
+					      be16_to_cpu(mask->tci), ovlan);
 			fs->mask.ethtype = 0;
 			fs->val.ethtype = 0;
 		}
 	} else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
 		CXGBE_FILL_FS(1, 1, ivlan_vld);
 		if (spec) {
-			CXGBE_FILL_FS(be16_to_cpu(spec->tci),
-				      be16_to_cpu(mask->tci), ivlan);
-
+			if (spec->tci || (umask && umask->tci))
+				CXGBE_FILL_FS(be16_to_cpu(spec->tci),
+					      be16_to_cpu(mask->tci), ivlan);
 			fs->mask.ethtype = 0;
 			fs->val.ethtype = 0;
 		}
 	}
 
-	if (spec)
+	if (spec && (spec->inner_type || (umask && umask->inner_type)))
 		CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
 			      be16_to_cpu(mask->inner_type), ethtype);
 
@@ -328,7 +332,8 @@ ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
 					  item,
 					  "VF ID > MAX(255)");
 
-	CXGBE_FILL_FS(val->id, mask->id, vf);
+	if (val->id || (umask && umask->id))
+		CXGBE_FILL_FS(val->id, mask->id, vf);
 
 	return 0;
 }
@@ -352,10 +357,15 @@ ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
 	CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
 	if (!val)
 		return 0;
-	CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
-		      be16_to_cpu(mask->hdr.src_port), fport);
-	CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
-		      be16_to_cpu(mask->hdr.dst_port), lport);
+
+	if (val->hdr.src_port || (umask && umask->hdr.src_port))
+		CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+			      be16_to_cpu(mask->hdr.src_port), fport);
+
+	if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
+		CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+			      be16_to_cpu(mask->hdr.dst_port), lport);
+
 	return 0;
 }
 
@@ -380,10 +390,15 @@ ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
 	CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
 	if (!val)
 		return 0;
-	CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
-		      be16_to_cpu(mask->hdr.src_port), fport);
-	CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
-		      be16_to_cpu(mask->hdr.dst_port), lport);
+
+	if (val->hdr.src_port || (umask && umask->hdr.src_port))
+		CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+			      be16_to_cpu(mask->hdr.src_port), fport);
+
+	if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
+		CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+			      be16_to_cpu(mask->hdr.dst_port), lport);
+
 	return 0;
 }
 
@@ -411,10 +426,21 @@ ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
 	if (!val)
 		return 0; /* ipv4 wild card */
 
-	CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
-	CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
-	CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
-	CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos);
+	if (val->hdr.next_proto_id || (umask && umask->hdr.next_proto_id))
+		CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id,
+			      proto);
+
+	if (val->hdr.dst_addr || (umask && umask->hdr.dst_addr))
+		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
+				     lip);
+
+	if (val->hdr.src_addr || (umask && umask->hdr.src_addr))
+		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
+				     fip);
+
+	if (val->hdr.type_of_service || (umask && umask->hdr.type_of_service))
+		CXGBE_FILL_FS(val->hdr.type_of_service,
+			      mask->hdr.type_of_service, tos);
 
 	return 0;
 }
@@ -428,6 +454,7 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
 	const struct rte_flow_item_ipv6 *umask = item->mask;
 	const struct rte_flow_item_ipv6 *mask;
 	u32 vtc_flow, vtc_flow_mask;
+	u8 z[16] = { 0 };
 
 	mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
 
@@ -448,17 +475,28 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
 	if (!val)
 		return 0; /* ipv6 wild card */
 
-	CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
+	if (val->hdr.proto || (umask && umask->hdr.proto))
+		CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
 
 	vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
-	CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
-		      RTE_IPV6_HDR_TC_SHIFT,
-		      (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
-		      RTE_IPV6_HDR_TC_SHIFT,
-		      tos);
-
-	CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
-	CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+	if (val->hdr.vtc_flow || (umask && umask->hdr.vtc_flow))
+		CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
+			      RTE_IPV6_HDR_TC_SHIFT,
+			      (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
+			      RTE_IPV6_HDR_TC_SHIFT,
+			      tos);
+
+	if (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
+	    (umask &&
+	     memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
+		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
+				     lip);
+
+	if (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
+	    (umask &&
+	     memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
+		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
+				     fip);
 
 	return 0;
 }
@@ -1051,8 +1089,8 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
 		}
 	}
 
-	cxgbe_fill_filter_region(adap, &flow->fs);
 	cxgbe_tweak_filter_spec(adap, &flow->fs);
+	cxgbe_fill_filter_region(adap, &flow->fs);
 
 	return 0;
 }
@@ -1310,6 +1348,7 @@ cxgbe_flow_validate(struct rte_eth_dev *dev,
 
 	flow->item_parser = parseitem;
 	flow->dev = dev;
+	flow->fs.private = (void *)flow;
 
 	ret = cxgbe_flow_parse(flow, attr, item, action, e);
 	if (ret) {
-- 
2.24.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks
  2020-06-12 22:07 [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Rahul Lakkireddy
                   ` (4 preceding siblings ...)
  2020-06-12 22:07 ` [dpdk-stable] [PATCH 5/5] net/cxgbe: ignore flow default masks for unrequested fields Rahul Lakkireddy
@ 2020-06-17 13:33 ` Ferruh Yigit
  5 siblings, 0 replies; 7+ messages in thread
From: Ferruh Yigit @ 2020-06-17 13:33 UTC (permalink / raw)
  To: Rahul Lakkireddy, dev; +Cc: stable, nirranjan

On 6/12/2020 11:07 PM, Rahul Lakkireddy wrote:
> This series of patches fix various hardware resource leaks in flow
> create failure and flow destroy paths.
> 
> Patch 1 fixes Compressed Local IP (CLIP) entry leaks.
> 
> Patch 2 fixes Layer 2 Table (L2T) entry leaks.
> 
> Patch 3 fixes double Multi Port Switch (MPS) entry allocations due
> to flow validate and create, but only freed once during flow destroy.
> 
> Patch 4 fixes Source MAC Table (SMT) entry leaks.
> 
> Patch 5 fixes flow validation errors seen due to default masks set
> for unrequested fields.
> 
> Thanks,
> Rahul
> 
> Rahul Lakkireddy (5):
>   net/cxgbe: fix CLIP leak in filter error path
>   net/cxgbe: fix L2T leak in filter error and free path
>   net/cxgbe: fix double MPS alloc due to flow validate and create
>   net/cxgbe: fix SMT leak in filter error and free path
>   net/cxgbe: ignore flow default masks for unrequested fields

Series applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-06-17 13:33 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-12 22:07 [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Rahul Lakkireddy
2020-06-12 22:07 ` [dpdk-stable] [PATCH 1/5] net/cxgbe: fix CLIP leak in filter error path Rahul Lakkireddy
2020-06-12 22:07 ` [dpdk-stable] [PATCH 2/5] net/cxgbe: fix L2T leak in filter error and free path Rahul Lakkireddy
2020-06-12 22:07 ` [dpdk-stable] [PATCH 3/5] net/cxgbe: fix double MPS alloc due to flow validate and create Rahul Lakkireddy
2020-06-12 22:07 ` [dpdk-stable] [PATCH 4/5] net/cxgbe: fix SMT leak in filter error and free path Rahul Lakkireddy
2020-06-12 22:07 ` [dpdk-stable] [PATCH 5/5] net/cxgbe: ignore flow default masks for unrequested fields Rahul Lakkireddy
2020-06-17 13:33 ` [dpdk-stable] [PATCH 0/5] net/cxgbe: fix rte_flow related hardware resource leaks Ferruh Yigit

patches for DPDK stable branches

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/stable/0 stable/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 stable stable/ https://inbox.dpdk.org/stable \
		stable@dpdk.org
	public-inbox-index stable

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.stable


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git