From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from stargate3.asicdesigners.com (stargate.chelsio.com [12.32.117.8]) by dpdk.org (Postfix) with ESMTP id 5B07BA56E for ; Wed, 3 Feb 2016 09:32:51 +0100 (CET) Received: from localhost (scalar.blr.asicdesigners.com [10.193.185.94]) by stargate3.asicdesigners.com (8.13.8/8.13.8) with ESMTP id u138WmKx024901; Wed, 3 Feb 2016 00:32:49 -0800 From: Rahul Lakkireddy To: dev@dpdk.org Date: Wed, 3 Feb 2016 14:02:25 +0530 Message-Id: <4b8071c8b872619a29dc6fdd316bcb422161f5ba.1454408702.git.rahul.lakkireddy@chelsio.com> X-Mailer: git-send-email 2.5.3 In-Reply-To: References: In-Reply-To: References: Cc: Kumar Sanghvi , Nirranjan Kirubaharan Subject: [dpdk-dev] [PATCH 04/10] cxgbe: add control txq for communicating filtering info X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 03 Feb 2016 08:32:52 -0000 Add a control txq with its own mempool to create control packets containing filtering information to be communicated with firmware. Signed-off-by: Rahul Lakkireddy Signed-off-by: Kumar Sanghvi --- drivers/net/cxgbe/base/adapter.h | 15 +++ drivers/net/cxgbe/base/common.h | 2 + drivers/net/cxgbe/base/t4_hw.c | 25 ++++ drivers/net/cxgbe/base/t4fw_interface.h | 64 ++++++++++ drivers/net/cxgbe/cxgbe.h | 1 + drivers/net/cxgbe/cxgbe_ethdev.c | 3 + drivers/net/cxgbe/cxgbe_main.c | 41 +++++++ drivers/net/cxgbe/sge.c | 199 +++++++++++++++++++++++++++++++- 8 files changed, 349 insertions(+), 1 deletion(-) diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h index 51896b1..ff8000f 100644 --- a/drivers/net/cxgbe/base/adapter.h +++ b/drivers/net/cxgbe/base/adapter.h @@ -44,6 +44,7 @@ enum { MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */ + MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ }; struct adapter; @@ -271,10 +272,20 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ unsigned int flags; /* flags for state of the queue */ } __rte_cache_aligned; +struct sge_ctrl_txq { /* State for an SGE control Tx queue */ + struct sge_txq q; /* txq */ + struct adapter *adapter; /* adapter associated with this queue */ + rte_spinlock_t ctrlq_lock; /* control queue lock */ + u8 full; /* the Tx ring is full */ + u64 txp; /* number of transmits */ + struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */ +} __rte_cache_aligned; + struct sge { struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; struct sge_rspq fw_evtq __rte_cache_aligned; + struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; u16 max_ethqsets; /* # of available Ethernet queue sets */ u32 stat_len; /* length of status page at ring end */ @@ -556,12 +567,16 @@ void t4_free_sge_resources(struct adapter *adap); void t4_sge_tx_monitor_start(struct adapter *adap); void t4_sge_tx_monitor_stop(struct adapter *adap); int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf); +int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf); int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_sge_init(struct adapter *adap); int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct rte_eth_dev *eth_dev, uint16_t queue_id, unsigned int iqid, int socket_id); +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id); int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq, struct rte_eth_dev *eth_dev, int intr_idx, struct sge_fl *fl, rspq_handler_t handler, diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h index cf2e82d..2b39c10 100644 --- a/drivers/net/cxgbe/base/common.h +++ b/drivers/net/cxgbe/base/common.h @@ -311,6 +311,8 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int fl0id, unsigned int fl1id); int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); static inline unsigned int core_ticks_per_usec(const struct adapter *adap) { diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c index 884d2cf..de2e6b7 100644 --- a/drivers/net/cxgbe/base/t4_hw.c +++ b/drivers/net/cxgbe/base/t4_hw.c @@ -2124,6 +2124,31 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, } /** + * t4_ctrl_eq_free - free a control egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ctrl_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_EQ_CTRL_CMD_PFN(pf) | + V_FW_EQ_CTRL_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); + c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter * @rpl: start of the FW message diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h index 8286ca1..ddbff89 100644 --- a/drivers/net/cxgbe/base/t4fw_interface.h +++ b/drivers/net/cxgbe/base/t4fw_interface.h @@ -178,6 +178,7 @@ enum fw_cmd_opcodes { FW_PARAMS_CMD = 0x08, FW_IQ_CMD = 0x10, FW_EQ_ETH_CMD = 0x12, + FW_EQ_CTRL_CMD = 0x13, FW_VI_CMD = 0x14, FW_VI_MAC_CMD = 0x15, FW_VI_RXMODE_CMD = 0x16, @@ -788,6 +789,69 @@ struct fw_eq_eth_cmd { #define G_FW_EQ_ETH_CMD_VIID(x) \ (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID) +struct fw_eq_ctrl_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 cmpliqid_eqid; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define S_FW_EQ_CTRL_CMD_PFN 8 +#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN) + +#define S_FW_EQ_CTRL_CMD_VFN 0 +#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN) + +#define S_FW_EQ_CTRL_CMD_ALLOC 31 +#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC) +#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U) + +#define S_FW_EQ_CTRL_CMD_FREE 30 +#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE) +#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U) + +#define S_FW_EQ_CTRL_CMD_EQSTART 28 +#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART) +#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U) + +#define S_FW_EQ_CTRL_CMD_CMPLIQID 20 +#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID) + +#define S_FW_EQ_CTRL_CMD_EQID 0 +#define M_FW_EQ_CTRL_CMD_EQID 0xfffff +#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID) +#define G_FW_EQ_CTRL_CMD_EQID(x) \ + (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID) + +#define S_FW_EQ_CTRL_CMD_FETCHRO 22 +#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO) +#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U) + +#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20 +#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3 +#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE) + +#define S_FW_EQ_CTRL_CMD_PCIECHN 16 +#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN) + +#define S_FW_EQ_CTRL_CMD_IQID 0 +#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID) + +#define S_FW_EQ_CTRL_CMD_FBMIN 23 +#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN) + +#define S_FW_EQ_CTRL_CMD_FBMAX 20 +#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX) + +#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16 +#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH) + +#define S_FW_EQ_CTRL_CMD_EQSIZE 0 +#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE) + enum fw_vi_func { FW_VI_FUNC_ETH, }; diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h index 0201c99..9ca4388 100644 --- a/drivers/net/cxgbe/cxgbe.h +++ b/drivers/net/cxgbe/cxgbe.h @@ -56,6 +56,7 @@ int link_start(struct port_info *pi); void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us, unsigned int cnt, unsigned int size, unsigned int iqe_size); int setup_sge_fwevtq(struct adapter *adapter); +int setup_sge_ctrl_txq(struct adapter *adapter); void cfg_queues(struct rte_eth_dev *eth_dev); int cfg_queue_count(struct rte_eth_dev *eth_dev); int setup_rss(struct port_info *pi); diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index 97ef152..2701bb6 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -393,6 +393,9 @@ static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) if (err) return err; adapter->flags |= FW_QUEUE_BOUND; + err = setup_sge_ctrl_txq(adapter); + if (err) + return err; } err = cfg_queue_count(eth_dev); diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c index b116a40..7a1cc13 100644 --- a/drivers/net/cxgbe/cxgbe_main.c +++ b/drivers/net/cxgbe/cxgbe_main.c @@ -123,6 +123,47 @@ out: return 0; } +/** + * Setup sge control queues to pass control information. + */ +int setup_sge_ctrl_txq(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int err = 0, i = 0; + + for_each_port(adapter, i) { + char name[RTE_ETH_NAME_MAX_LEN]; + struct sge_ctrl_txq *q = &s->ctrlq[i]; + + q->q.size = 1024; + err = t4_sge_alloc_ctrl_txq(adapter, q, + adapter->eth_dev, i, + s->fw_evtq.cntxt_id, + rte_socket_id()); + if (err) { + dev_err(adapter, "Failed to alloc ctrl txq. Err: %d", + err); + goto out; + } + snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i); + q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size, + RTE_CACHE_LINE_SIZE, + RTE_MBUF_PRIV_ALIGN, + RTE_MBUF_DEFAULT_BUF_SIZE, + SOCKET_ID_ANY); + if (!q->mb_pool) { + dev_err(adapter, "Can't create ctrl pool for port: %d", + i); + err = -ENOMEM; + goto out; + } + } + return 0; +out: + t4_free_sge_resources(adapter); + return err; +} + int setup_sge_fwevtq(struct adapter *adapter) { struct sge *s = &adapter->sge; diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index 3c62d03..bd4b381 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -84,6 +84,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, #define MAX_IMM_TX_PKT_LEN 256 /* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + +/* * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet * per mbuf buffer). We currently only support two sizes for 1500- and * 9000-byte MTUs. We could easily support more but there doesn't seem to be @@ -1234,6 +1239,129 @@ out_free: } /** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any mbufs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @mbuf: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct rte_mbuf *mbuf) +{ + return mbuf->pkt_len <= MAX_CTRL_WR_LEN; +} + +/** + * inline_tx_mbuf: inline a packet's data into TX descriptors + * @q: the TX queue where the packet will be inlined + * @from: pointer to data portion of packet + * @to: pointer after cpl where data has to be inlined + * @len: length of data to inline + * + * Inline a packet's contents directly to TX descriptors, starting at + * the given position within the TX DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to, + int len) +{ + int left = RTE_PTR_DIFF(q->stat, *to); + + if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) { + rte_memcpy(*to, from, len); + *to = RTE_PTR_ADD(*to, len); + } else { + rte_memcpy(*to, from, left); + from = RTE_PTR_ADD(from, left); + left = len - left; + rte_memcpy((void *)q->desc, from, left); + *to = RTE_PTR_ADD((void *)q->desc, left); + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @mbuf: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + caddr_t dst; + + if (unlikely(!is_imm(mbuf))) { + WARN_ON(1); + rte_pktmbuf_free(mbuf); + return -1; + } + + reclaim_completed_tx_imm(&q->q); + ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc)); + t4_os_lock(&q->ctrlq_lock); + + q->full = txq_avail(&q->q) < ndesc ? 1 : 0; + if (unlikely(q->full)) { + t4_os_unlock(&q->ctrlq_lock); + return -1; + } + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + dst = (void *)wr; + inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t), + &dst, mbuf->data_len); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < 64)) + wr->lo |= htonl(F_FW_WR_EQUEQ); + + q->txp++; + + ring_tx_db(q->adapter, &q->q); + t4_os_unlock(&q->ctrlq_lock); + + rte_pktmbuf_free(mbuf); + return 0; +} + +/** + * t4_mgmt_tx - send a management message + * @q: the control queue + * @mbuf: the packet containing the management message + * + * Send a management message through control queue. + */ +int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) +{ + int ret; + + ret = ctrl_xmit(q, mbuf); + return ret; +} + +/** * alloc_ring - allocate resources for an SGE descriptor ring * @dev: the PCI device's core device * @nelem: the number of descriptors @@ -1949,6 +2077,63 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return 0; } +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + eth_dev->driver->pci_drv.name, "ctrl_tx_ring", + eth_dev->data->port_id, queue_id); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), + 0, &txq->q.phys_addr, + NULL, 0, queue_id, + socket_id, z_name, z_name_sw); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_EQ_CTRL_CMD_PFN(adap->pf) | + V_FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC | + F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16)); + c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = + htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | + V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | + F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid)); + c.dcaen_to_eqsize = + htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | + V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret) { + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid))); + txq->adapter = adap; + txq->full = 0; + return 0; +} + static void free_txq(struct sge_txq *q) { q->cntxt_id = 0; @@ -2043,7 +2228,7 @@ void t4_sge_tx_monitor_stop(struct adapter *adap) */ void t4_free_sge_resources(struct adapter *adap) { - int i; + unsigned int i; struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0]; struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; @@ -2060,6 +2245,18 @@ void t4_free_sge_resources(struct adapter *adap) } } + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + reclaim_completed_tx_imm(&cq->q); + t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, + cq->q.cntxt_id); + free_txq(&cq->q); + } + } + if (adap->sge.fw_evtq.desc) free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); } -- 2.5.3