From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id EF7F7A3160 for ; Thu, 10 Oct 2019 16:40:25 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A55E71EA7A; Thu, 10 Oct 2019 16:39:56 +0200 (CEST) Received: from huawei.com (szxga07-in.huawei.com [45.249.212.35]) by dpdk.org (Postfix) with ESMTP id 154CD1EA72 for ; Thu, 10 Oct 2019 16:39:54 +0200 (CEST) Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id F217D9E3CDC4CA76725A for ; Thu, 10 Oct 2019 22:39:51 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 10 Oct 2019 22:39:43 +0800 From: Xiaoyun wang To: CC: , , , , , , Xiaoyun wang Date: Thu, 10 Oct 2019 22:51:58 +0800 Message-ID: <258e66e849f5ac6303de6c6ef6eb94ee4b66433c.1570718029.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v4 14/19] net/hinic: support inner L3 checksum offload X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch supports inner L3 checksum offload for VXLAN packets, modifies rx checksum offload. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/hinic_pmd_ethdev.c | 6 +- drivers/net/hinic/hinic_pmd_ethdev.h | 1 + drivers/net/hinic/hinic_pmd_rx.c | 29 ++--- drivers/net/hinic/hinic_pmd_rx.h | 2 +- drivers/net/hinic/hinic_pmd_tx.c | 203 +++++++++++++++++++++-------------- drivers/net/hinic/hinic_pmd_tx.h | 2 +- 6 files changed, 146 insertions(+), 97 deletions(-) diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c index 48ec467..b2cc0e4 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/drivers/net/hinic/hinic_pmd_ethdev.c @@ -255,7 +255,7 @@ static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev) * specific event. * * @param: The address of parameter (struct rte_eth_dev *) regsitered before. - **/ + */ static void hinic_dev_interrupt_handler(void *param) { struct rte_eth_dev *dev = param; @@ -1088,7 +1088,7 @@ static void hinic_rx_queue_release(void *queue) nic_dev = rxq->nic_dev; /* free rxq_pkt mbuf */ - hinic_free_all_rx_skbs(rxq); + hinic_free_all_rx_mbufs(rxq); /* free rxq_cqe, rxq_info */ hinic_free_rx_resources(rxq); @@ -1120,7 +1120,7 @@ static void hinic_tx_queue_release(void *queue) nic_dev = txq->nic_dev; /* free txq_pkt mbuf */ - hinic_free_all_tx_skbs(txq); + hinic_free_all_tx_mbufs(txq); /* free txq_info */ hinic_free_tx_resources(txq); diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index dd96667..3e3f3b3 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -178,6 +178,7 @@ struct hinic_nic_dev { * vf: the same with associate pf */ u32 default_cos; + u32 rx_csum_en; struct hinic_filter_info filter; struct hinic_ntuple_filter_list filter_ntuple_list; diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c index 08e02ae..a9f3962 100644 --- a/drivers/net/hinic/hinic_pmd_rx.c +++ b/drivers/net/hinic/hinic_pmd_rx.c @@ -309,7 +309,6 @@ static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq) { size_t cqe_mem_size; - /* allocate continuous cqe memory for saving number of memory zone */ cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth; rxq->cqe_start_vaddr = dma_zalloc_coherent(rxq->nic_dev->hwdev, @@ -421,7 +420,7 @@ void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev) if (nic_dev->rxqs[q_id] == NULL) continue; - hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]); + hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]); hinic_free_rx_resources(nic_dev->rxqs[q_id]); kfree(nic_dev->rxqs[q_id]); nic_dev->rxqs[q_id] = NULL; @@ -435,11 +434,11 @@ void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev) u16 q_id; for (q_id = 0; q_id < nic_dev->num_rq; q_id++) - hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]); + hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]); } static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq, - struct rte_mbuf *head_skb, + struct rte_mbuf *head_mbuf, u32 remain_pkt_len) { struct hinic_nic_dev *nic_dev = rxq->nic_dev; @@ -462,11 +461,11 @@ static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq, cur_mbuf->data_len = (u16)pkt_len; cur_mbuf->next = NULL; - head_skb->pkt_len += cur_mbuf->data_len; - head_skb->nb_segs++; + head_mbuf->pkt_len += cur_mbuf->data_len; + head_mbuf->nb_segs++; if (!rxm) - head_skb->next = cur_mbuf; + head_mbuf->next = cur_mbuf; else rxm->next = cur_mbuf; @@ -658,7 +657,6 @@ int hinic_rx_configure(struct rte_eth_dev *dev) struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); struct rte_eth_rss_conf rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; - u32 csum_en = 0; int err; if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) { @@ -678,9 +676,10 @@ int hinic_rx_configure(struct rte_eth_dev *dev) /* Enable both L3/L4 rx checksum offload */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM) - csum_en = HINIC_RX_CSUM_OFFLOAD_EN; + nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN; - err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en); + err = hinic_set_rx_csum_offload(nic_dev->hwdev, + HINIC_RX_CSUM_OFFLOAD_EN); if (err) goto rx_csum_ofl_err; @@ -703,7 +702,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev) } } -void hinic_free_all_rx_skbs(struct hinic_rxq *rxq) +void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq) { struct hinic_nic_dev *nic_dev = rxq->nic_dev; struct hinic_rx_info *rx_info; @@ -781,6 +780,10 @@ static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq) { uint32_t checksum_err; uint64_t flags; + struct hinic_nic_dev *nic_dev = rxq->nic_dev; + + if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN))) + return PKT_RX_IP_CKSUM_UNKNOWN; /* most case checksum is ok */ checksum_err = HINIC_GET_RX_CSUM_ERR(status); @@ -999,8 +1002,8 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts) rxm->data_len = rx_buf_len; rxm->pkt_len = rx_buf_len; - /* if jumbo use multi-wqebb update ci, - * recv_jumbo_pkt will also update ci + /* if receive jumbo, updating ci will be done by + * hinic_recv_jumbo_pkt function. */ HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt + 1); wqebb_cnt = 0; diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h index fe2735b..5cd17ec 100644 --- a/drivers/net/hinic/hinic_pmd_rx.h +++ b/drivers/net/hinic/hinic_pmd_rx.h @@ -105,7 +105,7 @@ struct hinic_rxq { u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts); -void hinic_free_all_rx_skbs(struct hinic_rxq *rxq); +void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq); void hinic_rx_alloc_pkts(struct hinic_rxq *rxq); diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c index 0ef7add..bdbb0f4 100644 --- a/drivers/net/hinic/hinic_pmd_tx.c +++ b/drivers/net/hinic/hinic_pmd_tx.c @@ -20,6 +20,9 @@ #include "hinic_pmd_tx.h" /* packet header and tx offload info */ +#define ETHER_LEN_NO_VLAN 14 +#define ETHER_LEN_WITH_VLAN 18 +#define HEADER_LEN_OFFSET 2 #define VXLANLEN 8 #define MAX_PLD_OFFSET 221 #define MAX_SINGLE_SGE_SIZE 65536 @@ -34,6 +37,9 @@ #define HINIC_TSO_PKT_MAX_SGE 127 /* tso max sge 127 */ #define HINIC_TSO_SEG_NUM_INVALID(num) ((num) > HINIC_TSO_PKT_MAX_SGE) +#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET 1 +#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET 0 + /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */ #define HINIC_BUF_DESC_SIZE(nr_descs) (SIZE_8BYTES(((u32)nr_descs) << 4)) @@ -476,16 +482,16 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf, hinic_set_l4_csum_info(struct hinic_sq_task *task, u32 *queue_info, struct hinic_tx_offload_info *poff_info) { - u32 tcp_udp_cs, sctp; + u32 tcp_udp_cs, sctp = 0; u16 l2hdr_len; - sctp = 0; if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE)) sctp = 1; tcp_udp_cs = poff_info->inner_l4_tcp_udp; - if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { + if (poff_info->tunnel_type == TUNNEL_UDP_CSUM || + poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { l2hdr_len = poff_info->outer_l2_len; task->pkt_info2 |= @@ -665,50 +671,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi); } -static inline int -hinic_validate_tx_offload(const struct rte_mbuf *m) -{ - uint64_t ol_flags = m->ol_flags; - uint64_t inner_l3_offset = m->l2_len; - - /* just support vxlan offload */ - if ((ol_flags & PKT_TX_TUNNEL_MASK) && - !(ol_flags & PKT_TX_TUNNEL_VXLAN)) - return -ENOTSUP; - - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - inner_l3_offset += m->outer_l2_len + m->outer_l3_len; - - /* Headers are fragmented */ - if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len) - return -ENOTSUP; - - /* IP checksum can be counted only for IPv4 packet */ - if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6)) - return -EINVAL; - - /* IP type not set when required */ - if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) { - if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6))) - return -EINVAL; - } - - /* Check requirements for TSO packet */ - if (ol_flags & PKT_TX_TCP_SEG) { - if (m->tso_segsz == 0 || - ((ol_flags & PKT_TX_IPV4) && - !(ol_flags & PKT_TX_IP_CKSUM))) - return -EINVAL; - } - - /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */ - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) && - !(ol_flags & PKT_TX_OUTER_IPV4)) - return -EINVAL; - - return 0; -} - static inline uint16_t hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags) { @@ -760,6 +722,65 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) return __rte_raw_cksum_reduce(sum); } +static inline void +hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info, + int outer_cs_flag) +{ + uint64_t ol_flags = m->ol_flags; + + if (outer_cs_flag == 1) { + if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + off_info->payload_offset = m->outer_l2_len + + m->outer_l3_len + m->l2_len + m->l3_len; + } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + off_info->payload_offset = m->outer_l2_len + + m->outer_l3_len + m->l2_len + + m->l3_len + m->l4_len; + } + } else { + if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + off_info->payload_offset = m->l2_len + m->l3_len; + } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + off_info->payload_offset = m->l2_len + m->l3_len + + m->l4_len; + } + } +} + +static inline void +hinic_analyze_tx_info(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_vlan_hdr *vlan_hdr; + struct rte_ipv4_hdr *ip4h; + u16 pkt_type; + u8 *hdr; + + hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*); + eth_hdr = (struct rte_ether_hdr *)hdr; + pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (pkt_type == RTE_ETHER_TYPE_VLAN) { + off_info->outer_l2_len = ETHER_LEN_WITH_VLAN; + vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1); + pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } else { + off_info->outer_l2_len = ETHER_LEN_NO_VLAN; + } + + if (pkt_type == RTE_ETHER_TYPE_IPV4) { + ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len); + off_info->outer_l3_len = (ip4h->version_ihl & 0xf) << + HEADER_LEN_OFFSET; + } else if (pkt_type == RTE_ETHER_TYPE_IPV6) { + /* not support ipv6 extension header */ + off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr); + } +} + static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info) @@ -771,42 +792,66 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) struct rte_ether_hdr *eth_hdr; struct rte_vlan_hdr *vlan_hdr; u16 eth_type = 0; - uint64_t inner_l3_offset = m->l2_len; + uint64_t inner_l3_offset; uint64_t ol_flags = m->ol_flags; - /* Does packet set any of available offloads */ + /* Check if the packets set available offload flags */ if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)) return 0; - if (unlikely(hinic_validate_tx_offload(m))) + /* Support only vxlan offload */ + if ((ol_flags & PKT_TX_TUNNEL_MASK) && + !(ol_flags & PKT_TX_TUNNEL_VXLAN)) + return -ENOTSUP; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (rte_validate_tx_offload(m) != 0) return -EINVAL; +#endif - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || - (ol_flags & PKT_TX_OUTER_IPV6) || - (ol_flags & PKT_TX_TUNNEL_VXLAN)) { - inner_l3_offset += m->outer_l2_len + m->outer_l3_len; - off_info->outer_l2_len = m->outer_l2_len; - off_info->outer_l3_len = m->outer_l3_len; - /* just support vxlan tunneling pkt */ - off_info->inner_l2_len = m->l2_len - VXLANLEN - - sizeof(struct rte_udp_hdr); - off_info->inner_l3_len = m->l3_len; - off_info->inner_l4_len = m->l4_len; - off_info->tunnel_length = m->l2_len; - off_info->payload_offset = m->outer_l2_len + - m->outer_l3_len + m->l2_len + m->l3_len; - off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + if (ol_flags & PKT_TX_TUNNEL_VXLAN) { + if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_IPV6)) { + inner_l3_offset = m->l2_len + m->outer_l2_len + + m->outer_l3_len; + off_info->outer_l2_len = m->outer_l2_len; + off_info->outer_l3_len = m->outer_l3_len; + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(*udp_hdr); + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_length = m->l2_len; + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_SET); + } else { + inner_l3_offset = m->l2_len; + hinic_analyze_tx_info(m, off_info); + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(*udp_hdr) - off_info->outer_l2_len - + off_info->outer_l3_len; + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_length = m->l2_len - + off_info->outer_l2_len - off_info->outer_l3_len; + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET); + } } else { + inner_l3_offset = m->l2_len; off_info->inner_l2_len = m->l2_len; off_info->inner_l3_len = m->l3_len; off_info->inner_l4_len = m->l4_len; off_info->tunnel_type = NOT_TUNNEL; - off_info->payload_offset = m->l2_len + m->l3_len; - } - if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) && - ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)) - off_info->payload_offset += m->l4_len; + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET); + } /* invalid udp or tcp header */ if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET)) @@ -816,7 +861,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) || (ol_flags & PKT_TX_OUTER_IP_CKSUM) || (ol_flags & PKT_TX_OUTER_IPV6))) { - off_info->tunnel_type = TUNNEL_UDP_CSUM; /* inner_l4_tcp_udp csum should be setted to calculate outter * udp checksum when vxlan packets without inner l3 and l4 @@ -840,8 +884,7 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + m->outer_l3_len); - udp_hdr->dgram_cksum = - hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags); + udp_hdr->dgram_cksum = 0; } else if (eth_type == RTE_ETHER_TYPE_IPV6) { off_info->outer_l3_type = IPV6_PKT; ipv6_hdr = @@ -852,9 +895,12 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, (m->outer_l2_len + m->outer_l3_len)); - udp_hdr->dgram_cksum = - hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); + udp_hdr->dgram_cksum = 0; } + } else if (ol_flags & PKT_TX_OUTER_IPV4) { + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + off_info->inner_l4_tcp_udp = 1; + off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; } if (ol_flags & PKT_TX_IPV4) @@ -892,7 +938,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) off_info->inner_l4_type = UDP_OFFLOAD_ENABLE; off_info->inner_l4_tcp_udp = 1; - off_info->inner_l4_len = sizeof(struct rte_udp_hdr); } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG)) { if (ol_flags & PKT_TX_IPV4) { @@ -1105,7 +1150,7 @@ u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts) return nb_tx; } -void hinic_free_all_tx_skbs(struct hinic_txq *txq) +void hinic_free_all_tx_mbufs(struct hinic_txq *txq) { u16 ci; struct hinic_nic_dev *nic_dev = txq->nic_dev; @@ -1145,7 +1190,7 @@ void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev) continue; /* stop tx queue free tx mbuf */ - hinic_free_all_tx_skbs(nic_dev->txqs[q_id]); + hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]); hinic_free_tx_resources(nic_dev->txqs[q_id]); /* free txq */ @@ -1162,7 +1207,7 @@ void hinic_free_all_tx_mbuf(struct rte_eth_dev *eth_dev) for (q_id = 0; q_id < nic_dev->num_sq; q_id++) /* stop tx queue free tx mbuf */ - hinic_free_all_tx_skbs(nic_dev->txqs[q_id]); + hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]); } int hinic_setup_tx_resources(struct hinic_txq *txq) diff --git a/drivers/net/hinic/hinic_pmd_tx.h b/drivers/net/hinic/hinic_pmd_tx.h index 8a3df27..a1ca580 100644 --- a/drivers/net/hinic/hinic_pmd_tx.h +++ b/drivers/net/hinic/hinic_pmd_tx.h @@ -131,7 +131,7 @@ struct hinic_txq { u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts); -void hinic_free_all_tx_skbs(struct hinic_txq *txq); +void hinic_free_all_tx_mbufs(struct hinic_txq *txq); void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats); -- 1.8.3.1