From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lf1-f67.google.com (mail-lf1-f67.google.com [209.85.167.67]) by dpdk.org (Postfix) with ESMTP id 40EDB1B4D3 for ; Thu, 2 Aug 2018 16:25:33 +0200 (CEST) Received: by mail-lf1-f67.google.com with SMTP id u14-v6so1719817lfu.0 for ; Thu, 02 Aug 2018 07:25:33 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nfware-com.20150623.gappssmtp.com; s=20150623; h=from:to:subject:date:message-id; bh=n5N8N2l8IBZt/ylkVGqIaowW42Tx20+vPBQBPVGbpIc=; b=WfXR9ipyZf6hpI8f+SHuClBFmfOuQXYWl9aWUCRnWeTOHpcVXfy1RBijwX+O4DodTU Aax6ZWWOqrlA4CfjzMhoe+Zb4wyzmNccccJKgze+wCNvYO0ltvDdywqNgo5jNRVq2YBA eDtsHX7QLr2aNh3L+nVqlmYWvdV4c/AVagW0eP2LoBuaIgnm/E1DA5t7AxZNfkZt8FhN rg44nD3W1T650HKs0glgWGmmyQ8h9VaVqjSRKBizFtvtrwS1TpXdAq8W4NiHRaiplI7U laCT/oSUYnGfmXdGZj24FSTKBUTB4VFTCl7yM6Qn8x8i6N/FveSp2Owj4vj2KpJETLl2 7GPw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:subject:date:message-id; bh=n5N8N2l8IBZt/ylkVGqIaowW42Tx20+vPBQBPVGbpIc=; b=HPsHlr6llq21I0a9q1uYYvxEtoRK78xeCKwHSQpptp94gf55Qu7HxmE2rOzVTYy4v+ Nojv/I/fU039VhVd8doLLm7154G7rkA0FSCU9C+dt1bG21Ku7YPJt8geKLX1WR6187Wg Z8k+0DLdT/c/ozXfYNM4wKSaF0HQNGAuMQEqzJ95ZUTfq3nRpUo+QAYXzJ4rPBC54kl9 sU0L2DHrfQWASdS49tkDAxZk6spiogNzwxR1Ngyt3omzIG9Ao5fha1FaLN4uJmm7bYga UuKl+2659+04dZfiUWneyKvc8GJKfGoF6aeOkCfV3TMkYOiGIlwSKmYiARkPX54zjw0U gCVg== X-Gm-Message-State: AOUpUlGJ8UYrijq/7CG2ZYAj8bFdW4bxhsblBYCpRPwFlIq0ZVF7G9ll DC/yjxkwzqfr2vNQGviINiwTRODIp22cOA== X-Google-Smtp-Source: AAOMgpc+4Udcyi8OgemQT1eOZIbYsleRjkY66dDsSMXmKxTxgdzzLkJ/0q423/Nh8p5TLia+j0W9Ag== X-Received: by 2002:a19:dedb:: with SMTP id i88-v6mr2151118lfl.26.1533219932148; Thu, 02 Aug 2018 07:25:32 -0700 (PDT) Received: from localhost.localdomain ([195.34.30.205]) by smtp.gmail.com with ESMTPSA id w14-v6sm406141ljh.71.2018.08.02.07.25.31 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Thu, 02 Aug 2018 07:25:31 -0700 (PDT) From: Igor Ryzhov To: dev@dpdk.org Date: Thu, 2 Aug 2018 17:25:22 +0300 Message-Id: <20180802142522.57900-1-iryzhov@nfware.com> X-Mailer: git-send-email 2.18.0 Subject: [dpdk-dev] [PATCH] kni: dynamically allocate memory for each KNI X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 02 Aug 2018 14:25:33 -0000 Long time ago preallocation of memory for KNI was introduced in commit 0c6bc8e. It was done because of lack of ability to free previously allocated memzones, which led to memzone exhaustion. Currently memzones can be freed and this patch uses this ability for dynamic KNI memory allocation. Signed-off-by: Igor Ryzhov --- lib/librte_kni/rte_kni.c | 392 ++++++++++++--------------------------- lib/librte_kni/rte_kni.h | 6 +- test/test/test_kni.c | 6 - 3 files changed, 128 insertions(+), 276 deletions(-) diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c index 8a8f6c1cc..028b44bfd 100644 --- a/lib/librte_kni/rte_kni.c +++ b/lib/librte_kni/rte_kni.c @@ -36,24 +36,33 @@ * KNI context */ struct rte_kni { + const struct rte_memzone *mz; /**< KNI context memzone */ char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */ uint16_t group_id; /**< Group ID of KNI devices */ uint32_t slot_id; /**< KNI pool slot ID */ struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */ unsigned mbuf_size; /**< mbuf size */ + const struct rte_memzone *m_tx_q; /**< TX queue memzone */ + const struct rte_memzone *m_rx_q; /**< RX queue memzone */ + const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */ + const struct rte_memzone *m_free_q; /**< Free queue memzone */ + struct rte_kni_fifo *tx_q; /**< TX queue */ struct rte_kni_fifo *rx_q; /**< RX queue */ struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */ struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */ + const struct rte_memzone *m_req_q; /**< Request queue memzone */ + const struct rte_memzone *m_resp_q; /**< Response queue memzone */ + const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */ + /* For request & response */ struct rte_kni_fifo *req_q; /**< Request queue */ struct rte_kni_fifo *resp_q; /**< Response queue */ void * sync_addr; /**< Req/Resp Mem address */ struct rte_kni_ops ops; /**< operations for request */ - uint8_t in_use : 1; /**< kni in use */ }; enum kni_ops_status { @@ -61,232 +70,110 @@ enum kni_ops_status { KNI_REQ_REGISTERED, }; -/** - * KNI memzone pool slot - */ -struct rte_kni_memzone_slot { - uint32_t id; - uint8_t in_use : 1; /**< slot in use */ - - /* Memzones */ - const struct rte_memzone *m_ctx; /**< KNI ctx */ - const struct rte_memzone *m_tx_q; /**< TX queue */ - const struct rte_memzone *m_rx_q; /**< RX queue */ - const struct rte_memzone *m_alloc_q; /**< Allocated mbufs queue */ - const struct rte_memzone *m_free_q; /**< To be freed mbufs queue */ - const struct rte_memzone *m_req_q; /**< Request queue */ - const struct rte_memzone *m_resp_q; /**< Response queue */ - const struct rte_memzone *m_sync_addr; - - /* Free linked list */ - struct rte_kni_memzone_slot *next; /**< Next slot link.list */ -}; - -/** - * KNI memzone pool - */ -struct rte_kni_memzone_pool { - uint8_t initialized : 1; /**< Global KNI pool init flag */ - - uint32_t max_ifaces; /**< Max. num of KNI ifaces */ - struct rte_kni_memzone_slot *slots; /**< Pool slots */ - rte_spinlock_t mutex; /**< alloc/release mutex */ - - /* Free memzone slots linked-list */ - struct rte_kni_memzone_slot *free; /**< First empty slot */ - struct rte_kni_memzone_slot *free_tail; /**< Last empty slot */ -}; - - static void kni_free_mbufs(struct rte_kni *kni); static void kni_allocate_mbufs(struct rte_kni *kni); static volatile int kni_fd = -1; -static struct rte_kni_memzone_pool kni_memzone_pool = { - .initialized = 0, -}; -static const struct rte_memzone * -kni_memzone_reserve(const char *name, size_t len, int socket_id, - unsigned flags) +/* Shall be called before any allocation happens */ +int +rte_kni_init(unsigned int max_kni_ifaces __rte_unused) { - const struct rte_memzone *mz = rte_memzone_lookup(name); + /* Check FD and open */ + if (kni_fd < 0) { + kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); + if (kni_fd < 0) { + RTE_LOG(ERR, KNI, + "Can not open /dev/%s\n", KNI_DEVICE); + return -1; + } + } - if (mz == NULL) - mz = rte_memzone_reserve(name, len, socket_id, flags); + return 0; +} - return mz; +static void +kni_ctx_release_mz(struct rte_kni *ctx) +{ + rte_memzone_free(ctx->m_tx_q); + rte_memzone_free(ctx->m_rx_q); + rte_memzone_free(ctx->m_alloc_q); + rte_memzone_free(ctx->m_free_q); + rte_memzone_free(ctx->m_req_q); + rte_memzone_free(ctx->m_resp_q); + rte_memzone_free(ctx->m_sync_addr); } -/* Pool mgmt */ -static struct rte_kni_memzone_slot* -kni_memzone_pool_alloc(void) +static int +kni_ctx_reserve_mz(struct rte_kni *ctx) { - struct rte_kni_memzone_slot *slot; + char mz_name[RTE_MEMZONE_NAMESIZE]; - rte_spinlock_lock(&kni_memzone_pool.mutex); + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_tx_%s", ctx->name); + ctx->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); + KNI_MEM_CHECK(ctx->m_tx_q == NULL); - if (!kni_memzone_pool.free) { - rte_spinlock_unlock(&kni_memzone_pool.mutex); - return NULL; - } + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_rx_%s", ctx->name); + ctx->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); + KNI_MEM_CHECK(ctx->m_rx_q == NULL); - slot = kni_memzone_pool.free; - kni_memzone_pool.free = slot->next; - slot->in_use = 1; + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_alloc_%s", ctx->name); + ctx->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); + KNI_MEM_CHECK(ctx->m_alloc_q == NULL); - if (!kni_memzone_pool.free) - kni_memzone_pool.free_tail = NULL; + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_free_%s", ctx->name); + ctx->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); + KNI_MEM_CHECK(ctx->m_free_q == NULL); - rte_spinlock_unlock(&kni_memzone_pool.mutex); + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_req_%s", ctx->name); + ctx->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); + KNI_MEM_CHECK(ctx->m_req_q == NULL); - return slot; -} + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_resp_%s", ctx->name); + ctx->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); + KNI_MEM_CHECK(ctx->m_resp_q == NULL); -static void -kni_memzone_pool_release(struct rte_kni_memzone_slot *slot) -{ - rte_spinlock_lock(&kni_memzone_pool.mutex); + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_sync_%s", ctx->name); + ctx->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); + KNI_MEM_CHECK(ctx->m_sync_addr == NULL); - if (kni_memzone_pool.free) - kni_memzone_pool.free_tail->next = slot; - else - kni_memzone_pool.free = slot; + return 0; - kni_memzone_pool.free_tail = slot; - slot->next = NULL; - slot->in_use = 0; +kni_fail: + kni_ctx_release_mz(ctx); - rte_spinlock_unlock(&kni_memzone_pool.mutex); + return -1; } +static void +kni_ctx_release(struct rte_kni *ctx) +{ + rte_memzone_free(ctx->mz); +} -/* Shall be called before any allocation happens */ -void -rte_kni_init(unsigned int max_kni_ifaces) +static struct rte_kni * +kni_ctx_reserve(const char *name) { - uint32_t i; - struct rte_kni_memzone_slot *it; + struct rte_kni *ctx; const struct rte_memzone *mz; -#define OBJNAMSIZ 32 - char obj_name[OBJNAMSIZ]; char mz_name[RTE_MEMZONE_NAMESIZE]; - /* Immediately return if KNI is already initialized */ - if (kni_memzone_pool.initialized) { - RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()"); - return; - } + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_info_%s", name); - if (max_kni_ifaces == 0) { - RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n", - max_kni_ifaces); - RTE_LOG(ERR, KNI, "Unable to initialize KNI\n"); - return; - } + mz = rte_memzone_reserve(mz_name, sizeof(struct rte_kni), SOCKET_ID_ANY, 0); + if (mz == NULL) + return NULL; - /* Check FD and open */ - if (kni_fd < 0) { - kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); - if (kni_fd < 0) { - RTE_LOG(ERR, KNI, - "Can not open /dev/%s\n", KNI_DEVICE); - return; - } - } + ctx = mz->addr; - /* Allocate slot objects */ - kni_memzone_pool.slots = (struct rte_kni_memzone_slot *) - rte_malloc(NULL, - sizeof(struct rte_kni_memzone_slot) * - max_kni_ifaces, - 0); - KNI_MEM_CHECK(kni_memzone_pool.slots == NULL); - - /* Initialize general pool variables */ - kni_memzone_pool.initialized = 1; - kni_memzone_pool.max_ifaces = max_kni_ifaces; - kni_memzone_pool.free = &kni_memzone_pool.slots[0]; - rte_spinlock_init(&kni_memzone_pool.mutex); - - /* Pre-allocate all memzones of all the slots; panic on error */ - for (i = 0; i < max_kni_ifaces; i++) { - - /* Recover current slot */ - it = &kni_memzone_pool.slots[i]; - it->id = i; - - /* Allocate KNI context */ - snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i); - mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni), - SOCKET_ID_ANY, 0); - KNI_MEM_CHECK(mz == NULL); - it->m_ctx = mz; - - /* TX RING */ - snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i); - mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, - SOCKET_ID_ANY, 0); - KNI_MEM_CHECK(mz == NULL); - it->m_tx_q = mz; - - /* RX RING */ - snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i); - mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, - SOCKET_ID_ANY, 0); - KNI_MEM_CHECK(mz == NULL); - it->m_rx_q = mz; - - /* ALLOC RING */ - snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i); - mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, - SOCKET_ID_ANY, 0); - KNI_MEM_CHECK(mz == NULL); - it->m_alloc_q = mz; - - /* FREE RING */ - snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i); - mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, - SOCKET_ID_ANY, 0); - KNI_MEM_CHECK(mz == NULL); - it->m_free_q = mz; - - /* Request RING */ - snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i); - mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, - SOCKET_ID_ANY, 0); - KNI_MEM_CHECK(mz == NULL); - it->m_req_q = mz; - - /* Response RING */ - snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i); - mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, - SOCKET_ID_ANY, 0); - KNI_MEM_CHECK(mz == NULL); - it->m_resp_q = mz; - - /* Req/Resp sync mem area */ - snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i); - mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, - SOCKET_ID_ANY, 0); - KNI_MEM_CHECK(mz == NULL); - it->m_sync_addr = mz; - - if ((i+1) == max_kni_ifaces) { - it->next = NULL; - kni_memzone_pool.free_tail = it; - } else - it->next = &kni_memzone_pool.slots[i+1]; - } + memset(ctx, 0, sizeof(struct rte_kni)); - return; + ctx->mz = mz; + snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", name); -kni_fail: - RTE_LOG(ERR, KNI, "Unable to allocate memory for max_kni_ifaces:%d." - "Increase the amount of hugepages memory\n", max_kni_ifaces); + return ctx; } - struct rte_kni * rte_kni_alloc(struct rte_mempool *pktmbuf_pool, const struct rte_kni_conf *conf, @@ -295,36 +182,20 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, int ret; struct rte_kni_device_info dev_info; struct rte_kni *ctx; - char intf_name[RTE_KNI_NAMESIZE]; - const struct rte_memzone *mz; - struct rte_kni_memzone_slot *slot = NULL; if (!pktmbuf_pool || !conf || !conf->name[0]) return NULL; /* Check if KNI subsystem has been initialized */ - if (kni_memzone_pool.initialized != 1) { + if (kni_fd < 0) { RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n"); return NULL; } - /* Get an available slot from the pool */ - slot = kni_memzone_pool_alloc(); - if (!slot) { - RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n", - kni_memzone_pool.max_ifaces); + ctx = kni_ctx_reserve(conf->name); + if (ctx == NULL) return NULL; - } - - /* Recover ctx */ - ctx = slot->m_ctx->addr; - snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name); - if (ctx->in_use) { - RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name); - return NULL; - } - memset(ctx, 0, sizeof(struct rte_kni)); if (ops) memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops)); else @@ -344,72 +215,68 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, memcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN); - snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name); - snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name); + snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", conf->name); RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n", dev_info.bus, dev_info.devid, dev_info.function, dev_info.vendor_id, dev_info.device_id); + + ret = kni_ctx_reserve_mz(ctx); + if (ret < 0) + goto mz_fail; + /* TX RING */ - mz = slot->m_tx_q; - ctx->tx_q = mz->addr; + ctx->tx_q = ctx->m_tx_q->addr; kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX); - dev_info.tx_phys = mz->phys_addr; + dev_info.tx_phys = ctx->m_tx_q->phys_addr; /* RX RING */ - mz = slot->m_rx_q; - ctx->rx_q = mz->addr; + ctx->rx_q = ctx->m_rx_q->addr; kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX); - dev_info.rx_phys = mz->phys_addr; + dev_info.rx_phys = ctx->m_rx_q->phys_addr; /* ALLOC RING */ - mz = slot->m_alloc_q; - ctx->alloc_q = mz->addr; + ctx->alloc_q = ctx->m_alloc_q->addr; kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX); - dev_info.alloc_phys = mz->phys_addr; + dev_info.alloc_phys = ctx->m_alloc_q->phys_addr; /* FREE RING */ - mz = slot->m_free_q; - ctx->free_q = mz->addr; + ctx->free_q = ctx->m_free_q->addr; kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX); - dev_info.free_phys = mz->phys_addr; + dev_info.free_phys = ctx->m_free_q->phys_addr; /* Request RING */ - mz = slot->m_req_q; - ctx->req_q = mz->addr; + ctx->req_q = ctx->m_req_q->addr; kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX); - dev_info.req_phys = mz->phys_addr; + dev_info.req_phys = ctx->m_req_q->phys_addr; /* Response RING */ - mz = slot->m_resp_q; - ctx->resp_q = mz->addr; + ctx->resp_q = ctx->m_resp_q->addr; kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX); - dev_info.resp_phys = mz->phys_addr; + dev_info.resp_phys = ctx->m_resp_q->phys_addr; /* Req/Resp sync mem area */ - mz = slot->m_sync_addr; - ctx->sync_addr = mz->addr; - dev_info.sync_va = mz->addr; - dev_info.sync_phys = mz->phys_addr; + ctx->sync_addr = ctx->m_sync_addr->addr; + dev_info.sync_va = ctx->m_sync_addr->addr; + dev_info.sync_phys = ctx->m_sync_addr->phys_addr; ctx->pktmbuf_pool = pktmbuf_pool; ctx->group_id = conf->group_id; - ctx->slot_id = slot->id; ctx->mbuf_size = conf->mbuf_size; ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info); - KNI_MEM_CHECK(ret < 0); - - ctx->in_use = 1; + if (ret < 0) + goto ioctl_fail; /* Allocate mbufs and then put them into alloc_q */ kni_allocate_mbufs(ctx); return ctx; -kni_fail: - if (slot) - kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]); +ioctl_fail: + kni_ctx_release_mz(ctx); +mz_fail: + kni_ctx_release(ctx); return NULL; } @@ -463,10 +330,9 @@ int rte_kni_release(struct rte_kni *kni) { struct rte_kni_device_info dev_info; - uint32_t slot_id; uint32_t retry = 5; - if (!kni || !kni->in_use) + if (!kni) return -1; snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name); @@ -488,18 +354,9 @@ rte_kni_release(struct rte_kni *kni) kni_free_fifo(kni->tx_q); kni_free_fifo(kni->free_q); - slot_id = kni->slot_id; + kni_ctx_release_mz(kni); - /* Memset the KNI struct */ - memset(kni, 0, sizeof(struct rte_kni)); - - /* Release memzone */ - if (slot_id > kni_memzone_pool.max_ifaces) { - RTE_LOG(ERR, KNI, "KNI pool: corrupted slot ID: %d, max: %d\n", - slot_id, kni_memzone_pool.max_ifaces); - return -1; - } - kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]); + kni_ctx_release(kni); return 0; } @@ -711,21 +568,18 @@ kni_allocate_mbufs(struct rte_kni *kni) struct rte_kni * rte_kni_get(const char *name) { - uint32_t i; - struct rte_kni_memzone_slot *it; - struct rte_kni *kni; - - /* Note: could be improved perf-wise if necessary */ - for (i = 0; i < kni_memzone_pool.max_ifaces; i++) { - it = &kni_memzone_pool.slots[i]; - if (it->in_use == 0) - continue; - kni = it->m_ctx->addr; - if (strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0) - return kni; - } + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; - return NULL; + if (!name || !name[0]) + return NULL; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_info_%s", name); + mz = rte_memzone_lookup(mz_name); + if (!mz) + return NULL; + + return mz->addr; } const char * diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h index 99055e2c2..601abdfc6 100644 --- a/lib/librte_kni/rte_kni.h +++ b/lib/librte_kni/rte_kni.h @@ -81,8 +81,12 @@ struct rte_kni_conf { * * @param max_kni_ifaces * The maximum number of KNI interfaces that can coexist concurrently + * + * @return + * - 0 indicates success. + * - negative value indicates failure. */ -void rte_kni_init(unsigned int max_kni_ifaces); +int rte_kni_init(unsigned int max_kni_ifaces); /** diff --git a/test/test/test_kni.c b/test/test/test_kni.c index 1b876719a..56c98513a 100644 --- a/test/test/test_kni.c +++ b/test/test/test_kni.c @@ -429,12 +429,6 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp) } test_kni_ctx = NULL; - /* test of releasing a released kni device */ - if (rte_kni_release(kni) == 0) { - printf("should not release a released kni device\n"); - return -1; - } - /* test of reusing memzone */ kni = rte_kni_alloc(mp, &conf, &ops); if (!kni) { -- 2.18.0