* [dpdk-dev] [PATCH] kni: dynamically allocate memory for each KNI
@ 2018-08-02 14:25 Igor Ryzhov
2018-08-27 17:06 ` Ferruh Yigit
2018-09-23 19:12 ` [dpdk-dev] [PATCH v2] " Igor Ryzhov
0 siblings, 2 replies; 13+ messages in thread
From: Igor Ryzhov @ 2018-08-02 14:25 UTC (permalink / raw)
To: dev
Long time ago preallocation of memory for KNI was introduced in commit
0c6bc8e. It was done because of lack of ability to free previously
allocated memzones, which led to memzone exhaustion. Currently memzones
can be freed and this patch uses this ability for dynamic KNI memory
allocation.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
---
lib/librte_kni/rte_kni.c | 392 ++++++++++++---------------------------
lib/librte_kni/rte_kni.h | 6 +-
test/test/test_kni.c | 6 -
3 files changed, 128 insertions(+), 276 deletions(-)
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 8a8f6c1cc..028b44bfd 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -36,24 +36,33 @@
* KNI context
*/
struct rte_kni {
+ const struct rte_memzone *mz; /**< KNI context memzone */
char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */
uint16_t group_id; /**< Group ID of KNI devices */
uint32_t slot_id; /**< KNI pool slot ID */
struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
unsigned mbuf_size; /**< mbuf size */
+ const struct rte_memzone *m_tx_q; /**< TX queue memzone */
+ const struct rte_memzone *m_rx_q; /**< RX queue memzone */
+ const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */
+ const struct rte_memzone *m_free_q; /**< Free queue memzone */
+
struct rte_kni_fifo *tx_q; /**< TX queue */
struct rte_kni_fifo *rx_q; /**< RX queue */
struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
+ const struct rte_memzone *m_req_q; /**< Request queue memzone */
+ const struct rte_memzone *m_resp_q; /**< Response queue memzone */
+ const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */
+
/* For request & response */
struct rte_kni_fifo *req_q; /**< Request queue */
struct rte_kni_fifo *resp_q; /**< Response queue */
void * sync_addr; /**< Req/Resp Mem address */
struct rte_kni_ops ops; /**< operations for request */
- uint8_t in_use : 1; /**< kni in use */
};
enum kni_ops_status {
@@ -61,232 +70,110 @@ enum kni_ops_status {
KNI_REQ_REGISTERED,
};
-/**
- * KNI memzone pool slot
- */
-struct rte_kni_memzone_slot {
- uint32_t id;
- uint8_t in_use : 1; /**< slot in use */
-
- /* Memzones */
- const struct rte_memzone *m_ctx; /**< KNI ctx */
- const struct rte_memzone *m_tx_q; /**< TX queue */
- const struct rte_memzone *m_rx_q; /**< RX queue */
- const struct rte_memzone *m_alloc_q; /**< Allocated mbufs queue */
- const struct rte_memzone *m_free_q; /**< To be freed mbufs queue */
- const struct rte_memzone *m_req_q; /**< Request queue */
- const struct rte_memzone *m_resp_q; /**< Response queue */
- const struct rte_memzone *m_sync_addr;
-
- /* Free linked list */
- struct rte_kni_memzone_slot *next; /**< Next slot link.list */
-};
-
-/**
- * KNI memzone pool
- */
-struct rte_kni_memzone_pool {
- uint8_t initialized : 1; /**< Global KNI pool init flag */
-
- uint32_t max_ifaces; /**< Max. num of KNI ifaces */
- struct rte_kni_memzone_slot *slots; /**< Pool slots */
- rte_spinlock_t mutex; /**< alloc/release mutex */
-
- /* Free memzone slots linked-list */
- struct rte_kni_memzone_slot *free; /**< First empty slot */
- struct rte_kni_memzone_slot *free_tail; /**< Last empty slot */
-};
-
-
static void kni_free_mbufs(struct rte_kni *kni);
static void kni_allocate_mbufs(struct rte_kni *kni);
static volatile int kni_fd = -1;
-static struct rte_kni_memzone_pool kni_memzone_pool = {
- .initialized = 0,
-};
-static const struct rte_memzone *
-kni_memzone_reserve(const char *name, size_t len, int socket_id,
- unsigned flags)
+/* Shall be called before any allocation happens */
+int
+rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
{
- const struct rte_memzone *mz = rte_memzone_lookup(name);
+ /* Check FD and open */
+ if (kni_fd < 0) {
+ kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
+ if (kni_fd < 0) {
+ RTE_LOG(ERR, KNI,
+ "Can not open /dev/%s\n", KNI_DEVICE);
+ return -1;
+ }
+ }
- if (mz == NULL)
- mz = rte_memzone_reserve(name, len, socket_id, flags);
+ return 0;
+}
- return mz;
+static void
+kni_ctx_release_mz(struct rte_kni *ctx)
+{
+ rte_memzone_free(ctx->m_tx_q);
+ rte_memzone_free(ctx->m_rx_q);
+ rte_memzone_free(ctx->m_alloc_q);
+ rte_memzone_free(ctx->m_free_q);
+ rte_memzone_free(ctx->m_req_q);
+ rte_memzone_free(ctx->m_resp_q);
+ rte_memzone_free(ctx->m_sync_addr);
}
-/* Pool mgmt */
-static struct rte_kni_memzone_slot*
-kni_memzone_pool_alloc(void)
+static int
+kni_ctx_reserve_mz(struct rte_kni *ctx)
{
- struct rte_kni_memzone_slot *slot;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
- rte_spinlock_lock(&kni_memzone_pool.mutex);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_tx_%s", ctx->name);
+ ctx->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(ctx->m_tx_q == NULL);
- if (!kni_memzone_pool.free) {
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
- return NULL;
- }
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_rx_%s", ctx->name);
+ ctx->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(ctx->m_rx_q == NULL);
- slot = kni_memzone_pool.free;
- kni_memzone_pool.free = slot->next;
- slot->in_use = 1;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_alloc_%s", ctx->name);
+ ctx->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(ctx->m_alloc_q == NULL);
- if (!kni_memzone_pool.free)
- kni_memzone_pool.free_tail = NULL;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_free_%s", ctx->name);
+ ctx->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(ctx->m_free_q == NULL);
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_req_%s", ctx->name);
+ ctx->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(ctx->m_req_q == NULL);
- return slot;
-}
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_resp_%s", ctx->name);
+ ctx->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(ctx->m_resp_q == NULL);
-static void
-kni_memzone_pool_release(struct rte_kni_memzone_slot *slot)
-{
- rte_spinlock_lock(&kni_memzone_pool.mutex);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_sync_%s", ctx->name);
+ ctx->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(ctx->m_sync_addr == NULL);
- if (kni_memzone_pool.free)
- kni_memzone_pool.free_tail->next = slot;
- else
- kni_memzone_pool.free = slot;
+ return 0;
- kni_memzone_pool.free_tail = slot;
- slot->next = NULL;
- slot->in_use = 0;
+kni_fail:
+ kni_ctx_release_mz(ctx);
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
+ return -1;
}
+static void
+kni_ctx_release(struct rte_kni *ctx)
+{
+ rte_memzone_free(ctx->mz);
+}
-/* Shall be called before any allocation happens */
-void
-rte_kni_init(unsigned int max_kni_ifaces)
+static struct rte_kni *
+kni_ctx_reserve(const char *name)
{
- uint32_t i;
- struct rte_kni_memzone_slot *it;
+ struct rte_kni *ctx;
const struct rte_memzone *mz;
-#define OBJNAMSIZ 32
- char obj_name[OBJNAMSIZ];
char mz_name[RTE_MEMZONE_NAMESIZE];
- /* Immediately return if KNI is already initialized */
- if (kni_memzone_pool.initialized) {
- RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()");
- return;
- }
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_info_%s", name);
- if (max_kni_ifaces == 0) {
- RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
- max_kni_ifaces);
- RTE_LOG(ERR, KNI, "Unable to initialize KNI\n");
- return;
- }
+ mz = rte_memzone_reserve(mz_name, sizeof(struct rte_kni), SOCKET_ID_ANY, 0);
+ if (mz == NULL)
+ return NULL;
- /* Check FD and open */
- if (kni_fd < 0) {
- kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
- if (kni_fd < 0) {
- RTE_LOG(ERR, KNI,
- "Can not open /dev/%s\n", KNI_DEVICE);
- return;
- }
- }
+ ctx = mz->addr;
- /* Allocate slot objects */
- kni_memzone_pool.slots = (struct rte_kni_memzone_slot *)
- rte_malloc(NULL,
- sizeof(struct rte_kni_memzone_slot) *
- max_kni_ifaces,
- 0);
- KNI_MEM_CHECK(kni_memzone_pool.slots == NULL);
-
- /* Initialize general pool variables */
- kni_memzone_pool.initialized = 1;
- kni_memzone_pool.max_ifaces = max_kni_ifaces;
- kni_memzone_pool.free = &kni_memzone_pool.slots[0];
- rte_spinlock_init(&kni_memzone_pool.mutex);
-
- /* Pre-allocate all memzones of all the slots; panic on error */
- for (i = 0; i < max_kni_ifaces; i++) {
-
- /* Recover current slot */
- it = &kni_memzone_pool.slots[i];
- it->id = i;
-
- /* Allocate KNI context */
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i);
- mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_ctx = mz;
-
- /* TX RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_tx_q = mz;
-
- /* RX RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_rx_q = mz;
-
- /* ALLOC RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_alloc_q = mz;
-
- /* FREE RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_free_q = mz;
-
- /* Request RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_req_q = mz;
-
- /* Response RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_resp_q = mz;
-
- /* Req/Resp sync mem area */
- snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_sync_addr = mz;
-
- if ((i+1) == max_kni_ifaces) {
- it->next = NULL;
- kni_memzone_pool.free_tail = it;
- } else
- it->next = &kni_memzone_pool.slots[i+1];
- }
+ memset(ctx, 0, sizeof(struct rte_kni));
- return;
+ ctx->mz = mz;
+ snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", name);
-kni_fail:
- RTE_LOG(ERR, KNI, "Unable to allocate memory for max_kni_ifaces:%d."
- "Increase the amount of hugepages memory\n", max_kni_ifaces);
+ return ctx;
}
-
struct rte_kni *
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
const struct rte_kni_conf *conf,
@@ -295,36 +182,20 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
int ret;
struct rte_kni_device_info dev_info;
struct rte_kni *ctx;
- char intf_name[RTE_KNI_NAMESIZE];
- const struct rte_memzone *mz;
- struct rte_kni_memzone_slot *slot = NULL;
if (!pktmbuf_pool || !conf || !conf->name[0])
return NULL;
/* Check if KNI subsystem has been initialized */
- if (kni_memzone_pool.initialized != 1) {
+ if (kni_fd < 0) {
RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
return NULL;
}
- /* Get an available slot from the pool */
- slot = kni_memzone_pool_alloc();
- if (!slot) {
- RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n",
- kni_memzone_pool.max_ifaces);
+ ctx = kni_ctx_reserve(conf->name);
+ if (ctx == NULL)
return NULL;
- }
-
- /* Recover ctx */
- ctx = slot->m_ctx->addr;
- snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);
- if (ctx->in_use) {
- RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
- return NULL;
- }
- memset(ctx, 0, sizeof(struct rte_kni));
if (ops)
memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
else
@@ -344,72 +215,68 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
memcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN);
- snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
- snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);
+ snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", conf->name);
RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
dev_info.bus, dev_info.devid, dev_info.function,
dev_info.vendor_id, dev_info.device_id);
+
+ ret = kni_ctx_reserve_mz(ctx);
+ if (ret < 0)
+ goto mz_fail;
+
/* TX RING */
- mz = slot->m_tx_q;
- ctx->tx_q = mz->addr;
+ ctx->tx_q = ctx->m_tx_q->addr;
kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
- dev_info.tx_phys = mz->phys_addr;
+ dev_info.tx_phys = ctx->m_tx_q->phys_addr;
/* RX RING */
- mz = slot->m_rx_q;
- ctx->rx_q = mz->addr;
+ ctx->rx_q = ctx->m_rx_q->addr;
kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
- dev_info.rx_phys = mz->phys_addr;
+ dev_info.rx_phys = ctx->m_rx_q->phys_addr;
/* ALLOC RING */
- mz = slot->m_alloc_q;
- ctx->alloc_q = mz->addr;
+ ctx->alloc_q = ctx->m_alloc_q->addr;
kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
- dev_info.alloc_phys = mz->phys_addr;
+ dev_info.alloc_phys = ctx->m_alloc_q->phys_addr;
/* FREE RING */
- mz = slot->m_free_q;
- ctx->free_q = mz->addr;
+ ctx->free_q = ctx->m_free_q->addr;
kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
- dev_info.free_phys = mz->phys_addr;
+ dev_info.free_phys = ctx->m_free_q->phys_addr;
/* Request RING */
- mz = slot->m_req_q;
- ctx->req_q = mz->addr;
+ ctx->req_q = ctx->m_req_q->addr;
kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
- dev_info.req_phys = mz->phys_addr;
+ dev_info.req_phys = ctx->m_req_q->phys_addr;
/* Response RING */
- mz = slot->m_resp_q;
- ctx->resp_q = mz->addr;
+ ctx->resp_q = ctx->m_resp_q->addr;
kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
- dev_info.resp_phys = mz->phys_addr;
+ dev_info.resp_phys = ctx->m_resp_q->phys_addr;
/* Req/Resp sync mem area */
- mz = slot->m_sync_addr;
- ctx->sync_addr = mz->addr;
- dev_info.sync_va = mz->addr;
- dev_info.sync_phys = mz->phys_addr;
+ ctx->sync_addr = ctx->m_sync_addr->addr;
+ dev_info.sync_va = ctx->m_sync_addr->addr;
+ dev_info.sync_phys = ctx->m_sync_addr->phys_addr;
ctx->pktmbuf_pool = pktmbuf_pool;
ctx->group_id = conf->group_id;
- ctx->slot_id = slot->id;
ctx->mbuf_size = conf->mbuf_size;
ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
- KNI_MEM_CHECK(ret < 0);
-
- ctx->in_use = 1;
+ if (ret < 0)
+ goto ioctl_fail;
/* Allocate mbufs and then put them into alloc_q */
kni_allocate_mbufs(ctx);
return ctx;
-kni_fail:
- if (slot)
- kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);
+ioctl_fail:
+ kni_ctx_release_mz(ctx);
+mz_fail:
+ kni_ctx_release(ctx);
return NULL;
}
@@ -463,10 +330,9 @@ int
rte_kni_release(struct rte_kni *kni)
{
struct rte_kni_device_info dev_info;
- uint32_t slot_id;
uint32_t retry = 5;
- if (!kni || !kni->in_use)
+ if (!kni)
return -1;
snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name);
@@ -488,18 +354,9 @@ rte_kni_release(struct rte_kni *kni)
kni_free_fifo(kni->tx_q);
kni_free_fifo(kni->free_q);
- slot_id = kni->slot_id;
+ kni_ctx_release_mz(kni);
- /* Memset the KNI struct */
- memset(kni, 0, sizeof(struct rte_kni));
-
- /* Release memzone */
- if (slot_id > kni_memzone_pool.max_ifaces) {
- RTE_LOG(ERR, KNI, "KNI pool: corrupted slot ID: %d, max: %d\n",
- slot_id, kni_memzone_pool.max_ifaces);
- return -1;
- }
- kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);
+ kni_ctx_release(kni);
return 0;
}
@@ -711,21 +568,18 @@ kni_allocate_mbufs(struct rte_kni *kni)
struct rte_kni *
rte_kni_get(const char *name)
{
- uint32_t i;
- struct rte_kni_memzone_slot *it;
- struct rte_kni *kni;
-
- /* Note: could be improved perf-wise if necessary */
- for (i = 0; i < kni_memzone_pool.max_ifaces; i++) {
- it = &kni_memzone_pool.slots[i];
- if (it->in_use == 0)
- continue;
- kni = it->m_ctx->addr;
- if (strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0)
- return kni;
- }
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
- return NULL;
+ if (!name || !name[0])
+ return NULL;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_info_%s", name);
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz)
+ return NULL;
+
+ return mz->addr;
}
const char *
diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h
index 99055e2c2..601abdfc6 100644
--- a/lib/librte_kni/rte_kni.h
+++ b/lib/librte_kni/rte_kni.h
@@ -81,8 +81,12 @@ struct rte_kni_conf {
*
* @param max_kni_ifaces
* The maximum number of KNI interfaces that can coexist concurrently
+ *
+ * @return
+ * - 0 indicates success.
+ * - negative value indicates failure.
*/
-void rte_kni_init(unsigned int max_kni_ifaces);
+int rte_kni_init(unsigned int max_kni_ifaces);
/**
diff --git a/test/test/test_kni.c b/test/test/test_kni.c
index 1b876719a..56c98513a 100644
--- a/test/test/test_kni.c
+++ b/test/test/test_kni.c
@@ -429,12 +429,6 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
}
test_kni_ctx = NULL;
- /* test of releasing a released kni device */
- if (rte_kni_release(kni) == 0) {
- printf("should not release a released kni device\n");
- return -1;
- }
-
/* test of reusing memzone */
kni = rte_kni_alloc(mp, &conf, &ops);
if (!kni) {
--
2.18.0
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH] kni: dynamically allocate memory for each KNI
2018-08-02 14:25 [dpdk-dev] [PATCH] kni: dynamically allocate memory for each KNI Igor Ryzhov
@ 2018-08-27 17:06 ` Ferruh Yigit
2018-08-29 9:52 ` Igor Ryzhov
2018-09-23 19:12 ` [dpdk-dev] [PATCH v2] " Igor Ryzhov
1 sibling, 1 reply; 13+ messages in thread
From: Ferruh Yigit @ 2018-08-27 17:06 UTC (permalink / raw)
To: Igor Ryzhov, dev
On 8/2/2018 3:25 PM, Igor Ryzhov wrote:
> Long time ago preallocation of memory for KNI was introduced in commit
> 0c6bc8e. It was done because of lack of ability to free previously
> allocated memzones, which led to memzone exhaustion. Currently memzones
> can be freed and this patch uses this ability for dynamic KNI memory
> allocation.
Hi Igor,
It is good to be able to allocate memory dynamically and get rid of the
"max_kni_ifaces" and "kni_memzone_pool", thanks for the patch.
Overall looks good, a few comments below.
>
> Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
> ---
> lib/librte_kni/rte_kni.c | 392 ++++++++++++---------------------------
> lib/librte_kni/rte_kni.h | 6 +-
> test/test/test_kni.c | 6 -
> 3 files changed, 128 insertions(+), 276 deletions(-)
>
> diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
> index 8a8f6c1cc..028b44bfd 100644
> --- a/lib/librte_kni/rte_kni.c
> +++ b/lib/librte_kni/rte_kni.c
> @@ -36,24 +36,33 @@
> * KNI context
> */
> struct rte_kni {
> + const struct rte_memzone *mz; /**< KNI context memzone */
I was thinking remove the context memzone and use rte_zmalloc() to create kni
objects but updated rte_kni_get() API seems relaying this.
If you see any other way to get kni object from name in rte_kni_get(), I am for
removing above *mz variable from rte_kni struct.
<...>
> +static void
> +kni_ctx_release_mz(struct rte_kni *ctx)
> +{
> + rte_memzone_free(ctx->m_tx_q);
> + rte_memzone_free(ctx->m_rx_q);
> + rte_memzone_free(ctx->m_alloc_q);
> + rte_memzone_free(ctx->m_free_q);
> + rte_memzone_free(ctx->m_req_q);
> + rte_memzone_free(ctx->m_resp_q);
> + rte_memzone_free(ctx->m_sync_addr);
"ctx" sounds confusing to me, isn't this "rte_kni" object instance, why not just
call it "kni" or if it is too generic "kni_obj" or similar? For other APIs as well.
And this is just a detail but about order of APIs would you mind having first
reserve() one, later release() one?
<...>
> -/* Shall be called before any allocation happens */
> -void
> -rte_kni_init(unsigned int max_kni_ifaces)
> +static struct rte_kni *
> +kni_ctx_reserve(const char *name)
> {
> - uint32_t i;
> - struct rte_kni_memzone_slot *it;
> + struct rte_kni *ctx;
> const struct rte_memzone *mz;
> -#define OBJNAMSIZ 32
> - char obj_name[OBJNAMSIZ];
> char mz_name[RTE_MEMZONE_NAMESIZE];
>
> - /* Immediately return if KNI is already initialized */
> - if (kni_memzone_pool.initialized) {
> - RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()");
> - return;
> - }
> + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_info_%s", name);
Can you please convert memzone names, like "kni_info" to defines, for all of them?
<...>
> @@ -81,8 +81,12 @@ struct rte_kni_conf {
> *
> * @param max_kni_ifaces
> * The maximum number of KNI interfaces that can coexist concurrently
> + *
> + * @return
> + * - 0 indicates success.
> + * - negative value indicates failure.
> */
> -void rte_kni_init(unsigned int max_kni_ifaces);
> +int rte_kni_init(unsigned int max_kni_ifaces);
This changes the API. Return type changes from "void" to "int". I agree "int"
makes more sense since API can fail, but this changes the ABI/API.
Since existing binaries doesn't check the return type at all there may be no
issue from ABI point of view but from API point of view some apps may get return
value not checked warnings, not sure though.
And the need of the API is questionable at this stage, it may be possible to
move rte_kni_alloc() where it already has "kni_fd" check.
What do you think keep API signature same for now, but add a deprecation notice
to remove the API. Next release (v19.02) remove rte_kni_init() completely?
<...>
> /**
> diff --git a/test/test/test_kni.c b/test/test/test_kni.c
> index 1b876719a..56c98513a 100644
> --- a/test/test/test_kni.c
> +++ b/test/test/test_kni.c
> @@ -429,12 +429,6 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
> }
> test_kni_ctx = NULL;
>
> - /* test of releasing a released kni device */
> - if (rte_kni_release(kni) == 0) {
> - printf("should not release a released kni device\n");
> - return -1;
> - }
Why need to remove this?
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH] kni: dynamically allocate memory for each KNI
2018-08-27 17:06 ` Ferruh Yigit
@ 2018-08-29 9:52 ` Igor Ryzhov
2018-08-30 10:55 ` Ferruh Yigit
0 siblings, 1 reply; 13+ messages in thread
From: Igor Ryzhov @ 2018-08-29 9:52 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: dev
Hello Ferruh,
Thanks for the review, comments inline.
On Mon, Aug 27, 2018 at 8:06 PM, Ferruh Yigit <ferruh.yigit@intel.com>
wrote:
> On 8/2/2018 3:25 PM, Igor Ryzhov wrote:
> > Long time ago preallocation of memory for KNI was introduced in commit
> > 0c6bc8e. It was done because of lack of ability to free previously
> > allocated memzones, which led to memzone exhaustion. Currently memzones
> > can be freed and this patch uses this ability for dynamic KNI memory
> > allocation.
>
> Hi Igor,
>
> It is good to be able to allocate memory dynamically and get rid of the
> "max_kni_ifaces" and "kni_memzone_pool", thanks for the patch.
>
> Overall looks good, a few comments below.
>
> >
> > Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
> > ---
> > lib/librte_kni/rte_kni.c | 392 ++++++++++++---------------------------
> > lib/librte_kni/rte_kni.h | 6 +-
> > test/test/test_kni.c | 6 -
> > 3 files changed, 128 insertions(+), 276 deletions(-)
> >
> > diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
> > index 8a8f6c1cc..028b44bfd 100644
> > --- a/lib/librte_kni/rte_kni.c
> > +++ b/lib/librte_kni/rte_kni.c
> > @@ -36,24 +36,33 @@
> > * KNI context
> > */
> > struct rte_kni {
> > + const struct rte_memzone *mz; /**< KNI context memzone */
>
> I was thinking remove the context memzone and use rte_zmalloc() to create
> kni
> objects but updated rte_kni_get() API seems relaying this.
> If you see any other way to get kni object from name in rte_kni_get(), I
> am for
> removing above *mz variable from rte_kni struct.
>
I had absolutely the same thought but didn't find a way to save
rte_kni_get() API.
Maybe someone has any ideas?
Or maybe this API can be marked deprecated and deleted in future?
>
> <...>
>
> > +static void
> > +kni_ctx_release_mz(struct rte_kni *ctx)
> > +{
> > + rte_memzone_free(ctx->m_tx_q);
> > + rte_memzone_free(ctx->m_rx_q);
> > + rte_memzone_free(ctx->m_alloc_q);
> > + rte_memzone_free(ctx->m_free_q);
> > + rte_memzone_free(ctx->m_req_q);
> > + rte_memzone_free(ctx->m_resp_q);
> > + rte_memzone_free(ctx->m_sync_addr);
>
>
> "ctx" sounds confusing to me, isn't this "rte_kni" object instance, why
> not just
> call it "kni" or if it is too generic "kni_obj" or similar? For other APIs
> as well.
>
"ctx" was already used in the code, so I didn't change it.
I also think that it's better to use "kni" – will change it in v2.
>
> And this is just a detail but about order of APIs would you mind having
> first
> reserve() one, later release() one?
>
Ok.
>
> <...>
>
> > -/* Shall be called before any allocation happens */
> > -void
> > -rte_kni_init(unsigned int max_kni_ifaces)
> > +static struct rte_kni *
> > +kni_ctx_reserve(const char *name)
> > {
> > - uint32_t i;
> > - struct rte_kni_memzone_slot *it;
> > + struct rte_kni *ctx;
> > const struct rte_memzone *mz;
> > -#define OBJNAMSIZ 32
> > - char obj_name[OBJNAMSIZ];
> > char mz_name[RTE_MEMZONE_NAMESIZE];
> >
> > - /* Immediately return if KNI is already initialized */
> > - if (kni_memzone_pool.initialized) {
> > - RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()");
> > - return;
> > - }
> > + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_info_%s", name);
>
> Can you please convert memzone names, like "kni_info" to defines, for all
> of them?
>
Ok.
>
> <...>
>
> > @@ -81,8 +81,12 @@ struct rte_kni_conf {
> > *
> > * @param max_kni_ifaces
> > * The maximum number of KNI interfaces that can coexist concurrently
> > + *
> > + * @return
> > + * - 0 indicates success.
> > + * - negative value indicates failure.
> > */
> > -void rte_kni_init(unsigned int max_kni_ifaces);
> > +int rte_kni_init(unsigned int max_kni_ifaces);
>
> This changes the API. Return type changes from "void" to "int". I agree
> "int"
> makes more sense since API can fail, but this changes the ABI/API.
>
> Since existing binaries doesn't check the return type at all there may be
> no
> issue from ABI point of view but from API point of view some apps may get
> return
> value not checked warnings, not sure though.
>
> And the need of the API is questionable at this stage, it may be possible
> to
> move rte_kni_alloc() where it already has "kni_fd" check.
>
> What do you think keep API signature same for now, but add a deprecation
> notice
> to remove the API. Next release (v19.02) remove rte_kni_init() completely?
>
As I know, warnings can only be returned if the warn_unused_result
attribute is used, which is not the case here.
So I think that changing from void to int should not break anything. Can
change it back in v2 if I'm wrong.
Regarding the API removal – I think it's better to save that function, to
have a more clear API.
As we have rte_kni_close to close KNI device, we should have a function to
open it.
Maybe it should be renamed to rte_kni_open :)
>
> <...>
>
> > /**
> > diff --git a/test/test/test_kni.c b/test/test/test_kni.c
> > index 1b876719a..56c98513a 100644
> > --- a/test/test/test_kni.c
> > +++ b/test/test/test_kni.c
> > @@ -429,12 +429,6 @@ test_kni_processing(uint16_t port_id, struct
> rte_mempool *mp)
> > }
> > test_kni_ctx = NULL;
> >
> > - /* test of releasing a released kni device */
> > - if (rte_kni_release(kni) == 0) {
> > - printf("should not release a released kni device\n");
> > - return -1;
> > - }
>
> Why need to remove this?
>
Previously, rte_kni_release didn't free any memory, and the second call
with the same argument just checked "in_use" flag and returned.
After my changes, memory is actually freed, and rte_kni_release must not be
called twice with the same argument.
Will send v2 with approved changes in a couple of days.
At the same time, I'll think what can we do with context memzone.
Best regards,
Igor
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH] kni: dynamically allocate memory for each KNI
2018-08-29 9:52 ` Igor Ryzhov
@ 2018-08-30 10:55 ` Ferruh Yigit
0 siblings, 0 replies; 13+ messages in thread
From: Ferruh Yigit @ 2018-08-30 10:55 UTC (permalink / raw)
To: Igor Ryzhov; +Cc: dev
On 8/29/2018 10:52 AM, Igor Ryzhov wrote:
> Hello Ferruh,
>
> Thanks for the review, comments inline.
>
> On Mon, Aug 27, 2018 at 8:06 PM, Ferruh Yigit <ferruh.yigit@intel.com
> <mailto:ferruh.yigit@intel.com>> wrote:
>
> On 8/2/2018 3:25 PM, Igor Ryzhov wrote:
> > Long time ago preallocation of memory for KNI was introduced in commit
> > 0c6bc8e. It was done because of lack of ability to free previously
> > allocated memzones, which led to memzone exhaustion. Currently memzones
> > can be freed and this patch uses this ability for dynamic KNI memory
> > allocation.
>
> Hi Igor,
>
> It is good to be able to allocate memory dynamically and get rid of the
> "max_kni_ifaces" and "kni_memzone_pool", thanks for the patch.
>
> Overall looks good, a few comments below.
>
> >
> > Signed-off-by: Igor Ryzhov <iryzhov@nfware.com <mailto:iryzhov@nfware.com>>
> > ---
> > lib/librte_kni/rte_kni.c | 392 ++++++++++++---------------------------
> > lib/librte_kni/rte_kni.h | 6 +-
> > test/test/test_kni.c | 6 -
> > 3 files changed, 128 insertions(+), 276 deletions(-)
> >
> > diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
> > index 8a8f6c1cc..028b44bfd 100644
> > --- a/lib/librte_kni/rte_kni.c
> > +++ b/lib/librte_kni/rte_kni.c
> > @@ -36,24 +36,33 @@
> > * KNI context
> > */
> > struct rte_kni {
> > + const struct rte_memzone *mz; /**< KNI context memzone */
>
> I was thinking remove the context memzone and use rte_zmalloc() to create kni
> objects but updated rte_kni_get() API seems relaying this.
> If you see any other way to get kni object from name in rte_kni_get(), I am for
> removing above *mz variable from rte_kni struct.
>
>
> I had absolutely the same thought but didn't find a way to save rte_kni_get() API.
> Maybe someone has any ideas?
> Or maybe this API can be marked deprecated and deleted in future?
I suggest keep API, there may be some users, we don't know. And it doesn't sound
right to remove an API because of internal implementation details, so looks like
will need to keep memzone.
>
>
>
> <...>
>
> > +static void
> > +kni_ctx_release_mz(struct rte_kni *ctx)
> > +{
> > + rte_memzone_free(ctx->m_tx_q);
> > + rte_memzone_free(ctx->m_rx_q);
> > + rte_memzone_free(ctx->m_alloc_q);
> > + rte_memzone_free(ctx->m_free_q);
> > + rte_memzone_free(ctx->m_req_q);
> > + rte_memzone_free(ctx->m_resp_q);
> > + rte_memzone_free(ctx->m_sync_addr);
>
>
> "ctx" sounds confusing to me, isn't this "rte_kni" object instance, why not just
> call it "kni" or if it is too generic "kni_obj" or similar? For other APIs
> as well.
>
>
> "ctx" was already used in the code, so I didn't change it.
> I also think that it's better to use "kni" – will change it in v2.
>
>
>
> And this is just a detail but about order of APIs would you mind having first
> reserve() one, later release() one?
>
>
> Ok.
>
>
>
> <...>
>
> > -/* Shall be called before any allocation happens */
> > -void
> > -rte_kni_init(unsigned int max_kni_ifaces)
> > +static struct rte_kni *
> > +kni_ctx_reserve(const char *name)
> > {
> > - uint32_t i;
> > - struct rte_kni_memzone_slot *it;
> > + struct rte_kni *ctx;
> > const struct rte_memzone *mz;
> > -#define OBJNAMSIZ 32
> > - char obj_name[OBJNAMSIZ];
> > char mz_name[RTE_MEMZONE_NAMESIZE];
> >
> > - /* Immediately return if KNI is already initialized */
> > - if (kni_memzone_pool.initialized) {
> > - RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()");
> > - return;
> > - }
> > + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "kni_info_%s", name);
>
> Can you please convert memzone names, like "kni_info" to defines, for all of
> them?
>
>
> Ok.
>
>
>
> <...>
>
> > @@ -81,8 +81,12 @@ struct rte_kni_conf {
> > *
> > * @param max_kni_ifaces
> > * The maximum number of KNI interfaces that can coexist concurrently
> > + *
> > + * @return
> > + * - 0 indicates success.
> > + * - negative value indicates failure.
> > */
> > -void rte_kni_init(unsigned int max_kni_ifaces);
> > +int rte_kni_init(unsigned int max_kni_ifaces);
>
> This changes the API. Return type changes from "void" to "int". I agree "int"
> makes more sense since API can fail, but this changes the ABI/API.
>
> Since existing binaries doesn't check the return type at all there may be no
> issue from ABI point of view but from API point of view some apps may get return
> value not checked warnings, not sure though.
>
> And the need of the API is questionable at this stage, it may be possible to
> move rte_kni_alloc() where it already has "kni_fd" check.
>
> What do you think keep API signature same for now, but add a deprecation notice
> to remove the API. Next release (v19.02) remove rte_kni_init() completely?
>
>
> As I know, warnings can only be returned if the warn_unused_result attribute is
> used, which is not the case here.
> So I think that changing from void to int should not break anything. Can change
> it back in v2 if I'm wrong.
>
> Regarding the API removal – I think it's better to save that function, to have a
> more clear API.
OK, fair enough. Lets keep it with same name.
> As we have rte_kni_close to close KNI device, we should have a function to open it.
> Maybe it should be renamed to rte_kni_open :)
>
>
>
> <...>
>
> > /**
> > diff --git a/test/test/test_kni.c b/test/test/test_kni.c
> > index 1b876719a..56c98513a 100644
> > --- a/test/test/test_kni.c
> > +++ b/test/test/test_kni.c
> > @@ -429,12 +429,6 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
> > }
> > test_kni_ctx = NULL;
> >
> > - /* test of releasing a released kni device */
> > - if (rte_kni_release(kni) == 0) {
> > - printf("should not release a released kni device\n");
> > - return -1;
> > - }
>
> Why need to remove this?
>
>
> Previously, rte_kni_release didn't free any memory, and the second call with the
> same argument just checked "in_use" flag and returned.
> After my changes, memory is actually freed, and rte_kni_release must not be
> called twice with the same argument.
OK.
> Will send v2 with approved changes in a couple of days.
> At the same time, I'll think what can we do with context memzone.
Thanks.
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v2] kni: dynamically allocate memory for each KNI
2018-08-02 14:25 [dpdk-dev] [PATCH] kni: dynamically allocate memory for each KNI Igor Ryzhov
2018-08-27 17:06 ` Ferruh Yigit
@ 2018-09-23 19:12 ` Igor Ryzhov
2018-09-26 10:41 ` Ferruh Yigit
2018-09-26 16:21 ` [dpdk-dev] [PATCH v3] " Igor Ryzhov
1 sibling, 2 replies; 13+ messages in thread
From: Igor Ryzhov @ 2018-09-23 19:12 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
Long time ago preallocation of memory for KNI was introduced in commit
0c6bc8e. It was done because of lack of ability to free previously
allocated memzones, which led to memzone exhaustion. Currently memzones
can be freed and this patch uses this ability for dynamic KNI memory
allocation.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
---
v2:
* allocate KNI using rte_zmalloc
* swap reserve/release functions
* use "kni" as a variable name
* use macros for memzone names
lib/librte_kni/rte_kni.c | 502 +++++++++++++++++----------------------
lib/librte_kni/rte_kni.h | 6 +-
test/test/test_kni.c | 6 -
3 files changed, 218 insertions(+), 296 deletions(-)
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 8a8f6c1cc..6af6e7efe 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -18,6 +18,9 @@
#include <rte_log.h>
#include <rte_kni.h>
#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_rwlock.h>
+#include <rte_eal_memconfig.h>
#include <exec-env/rte_kni_common.h>
#include "rte_kni_fifo.h"
@@ -30,7 +33,23 @@
#define KNI_REQUEST_MBUF_NUM_MAX 32
-#define KNI_MEM_CHECK(cond) do { if (cond) goto kni_fail; } while (0)
+#define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0)
+
+#define KNI_MZ_NAME_FMT "kni_info_%s"
+#define KNI_TX_Q_MZ_NAME_FMT "kni_tx_%s"
+#define KNI_RX_Q_MZ_NAME_FMT "kni_rx_%s"
+#define KNI_ALLOC_Q_MZ_NAME_FMT "kni_alloc_%s"
+#define KNI_FREE_Q_MZ_NAME_FMT "kni_free_%s"
+#define KNI_REQ_Q_MZ_NAME_FMT "kni_req_%s"
+#define KNI_RESP_Q_MZ_NAME_FMT "kni_resp_%s"
+#define KNI_SYNC_ADDR_MZ_NAME_FMT "kni_sync_%s"
+
+TAILQ_HEAD(rte_kni_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_kni_tailq = {
+ .name = "RTE_KNI",
+};
+EAL_REGISTER_TAILQ(rte_kni_tailq)
/**
* KNI context
@@ -42,18 +61,26 @@ struct rte_kni {
struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
unsigned mbuf_size; /**< mbuf size */
+ const struct rte_memzone *m_tx_q; /**< TX queue memzone */
+ const struct rte_memzone *m_rx_q; /**< RX queue memzone */
+ const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */
+ const struct rte_memzone *m_free_q; /**< Free queue memzone */
+
struct rte_kni_fifo *tx_q; /**< TX queue */
struct rte_kni_fifo *rx_q; /**< RX queue */
struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
+ const struct rte_memzone *m_req_q; /**< Request queue memzone */
+ const struct rte_memzone *m_resp_q; /**< Response queue memzone */
+ const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */
+
/* For request & response */
struct rte_kni_fifo *req_q; /**< Request queue */
struct rte_kni_fifo *resp_q; /**< Response queue */
void * sync_addr; /**< Req/Resp Mem address */
struct rte_kni_ops ops; /**< operations for request */
- uint8_t in_use : 1; /**< kni in use */
};
enum kni_ops_status {
@@ -61,232 +88,91 @@ enum kni_ops_status {
KNI_REQ_REGISTERED,
};
-/**
- * KNI memzone pool slot
- */
-struct rte_kni_memzone_slot {
- uint32_t id;
- uint8_t in_use : 1; /**< slot in use */
-
- /* Memzones */
- const struct rte_memzone *m_ctx; /**< KNI ctx */
- const struct rte_memzone *m_tx_q; /**< TX queue */
- const struct rte_memzone *m_rx_q; /**< RX queue */
- const struct rte_memzone *m_alloc_q; /**< Allocated mbufs queue */
- const struct rte_memzone *m_free_q; /**< To be freed mbufs queue */
- const struct rte_memzone *m_req_q; /**< Request queue */
- const struct rte_memzone *m_resp_q; /**< Response queue */
- const struct rte_memzone *m_sync_addr;
-
- /* Free linked list */
- struct rte_kni_memzone_slot *next; /**< Next slot link.list */
-};
-
-/**
- * KNI memzone pool
- */
-struct rte_kni_memzone_pool {
- uint8_t initialized : 1; /**< Global KNI pool init flag */
-
- uint32_t max_ifaces; /**< Max. num of KNI ifaces */
- struct rte_kni_memzone_slot *slots; /**< Pool slots */
- rte_spinlock_t mutex; /**< alloc/release mutex */
-
- /* Free memzone slots linked-list */
- struct rte_kni_memzone_slot *free; /**< First empty slot */
- struct rte_kni_memzone_slot *free_tail; /**< Last empty slot */
-};
-
-
static void kni_free_mbufs(struct rte_kni *kni);
static void kni_allocate_mbufs(struct rte_kni *kni);
static volatile int kni_fd = -1;
-static struct rte_kni_memzone_pool kni_memzone_pool = {
- .initialized = 0,
-};
-static const struct rte_memzone *
-kni_memzone_reserve(const char *name, size_t len, int socket_id,
- unsigned flags)
+/* Shall be called before any allocation happens */
+int
+rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
{
- const struct rte_memzone *mz = rte_memzone_lookup(name);
-
- if (mz == NULL)
- mz = rte_memzone_reserve(name, len, socket_id, flags);
+ /* Check FD and open */
+ if (kni_fd < 0) {
+ kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
+ if (kni_fd < 0) {
+ RTE_LOG(ERR, KNI,
+ "Can not open /dev/%s\n", KNI_DEVICE);
+ return -1;
+ }
+ }
- return mz;
+ return 0;
}
-/* Pool mgmt */
-static struct rte_kni_memzone_slot*
-kni_memzone_pool_alloc(void)
+static int
+kni_reserve_mz(struct rte_kni *kni)
{
- struct rte_kni_memzone_slot *slot;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
- rte_spinlock_lock(&kni_memzone_pool.mutex);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);
+ kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);
- if (!kni_memzone_pool.free) {
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
- return NULL;
- }
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);
+ kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);
- slot = kni_memzone_pool.free;
- kni_memzone_pool.free = slot->next;
- slot->in_use = 1;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);
+ kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);
- if (!kni_memzone_pool.free)
- kni_memzone_pool.free_tail = NULL;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);
+ kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);
+ kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail);
- return slot;
-}
-
-static void
-kni_memzone_pool_release(struct rte_kni_memzone_slot *slot)
-{
- rte_spinlock_lock(&kni_memzone_pool.mutex);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name);
+ kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail);
- if (kni_memzone_pool.free)
- kni_memzone_pool.free_tail->next = slot;
- else
- kni_memzone_pool.free = slot;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name);
+ kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail);
- kni_memzone_pool.free_tail = slot;
- slot->next = NULL;
- slot->in_use = 0;
+ return 0;
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
+sync_addr_fail:
+ rte_memzone_free(kni->m_resp_q);
+resp_q_fail:
+ rte_memzone_free(kni->m_req_q);
+req_q_fail:
+ rte_memzone_free(kni->m_free_q);
+free_q_fail:
+ rte_memzone_free(kni->m_alloc_q);
+alloc_q_fail:
+ rte_memzone_free(kni->m_rx_q);
+rx_q_fail:
+ rte_memzone_free(kni->m_tx_q);
+tx_q_fail:
+ return -1;
}
-
-/* Shall be called before any allocation happens */
-void
-rte_kni_init(unsigned int max_kni_ifaces)
+static void
+kni_release_mz(struct rte_kni *kni)
{
- uint32_t i;
- struct rte_kni_memzone_slot *it;
- const struct rte_memzone *mz;
-#define OBJNAMSIZ 32
- char obj_name[OBJNAMSIZ];
- char mz_name[RTE_MEMZONE_NAMESIZE];
-
- /* Immediately return if KNI is already initialized */
- if (kni_memzone_pool.initialized) {
- RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()");
- return;
- }
-
- if (max_kni_ifaces == 0) {
- RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
- max_kni_ifaces);
- RTE_LOG(ERR, KNI, "Unable to initialize KNI\n");
- return;
- }
-
- /* Check FD and open */
- if (kni_fd < 0) {
- kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
- if (kni_fd < 0) {
- RTE_LOG(ERR, KNI,
- "Can not open /dev/%s\n", KNI_DEVICE);
- return;
- }
- }
-
- /* Allocate slot objects */
- kni_memzone_pool.slots = (struct rte_kni_memzone_slot *)
- rte_malloc(NULL,
- sizeof(struct rte_kni_memzone_slot) *
- max_kni_ifaces,
- 0);
- KNI_MEM_CHECK(kni_memzone_pool.slots == NULL);
-
- /* Initialize general pool variables */
- kni_memzone_pool.initialized = 1;
- kni_memzone_pool.max_ifaces = max_kni_ifaces;
- kni_memzone_pool.free = &kni_memzone_pool.slots[0];
- rte_spinlock_init(&kni_memzone_pool.mutex);
-
- /* Pre-allocate all memzones of all the slots; panic on error */
- for (i = 0; i < max_kni_ifaces; i++) {
-
- /* Recover current slot */
- it = &kni_memzone_pool.slots[i];
- it->id = i;
-
- /* Allocate KNI context */
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i);
- mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_ctx = mz;
-
- /* TX RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_tx_q = mz;
-
- /* RX RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_rx_q = mz;
-
- /* ALLOC RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_alloc_q = mz;
-
- /* FREE RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_free_q = mz;
-
- /* Request RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_req_q = mz;
-
- /* Response RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_resp_q = mz;
-
- /* Req/Resp sync mem area */
- snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_sync_addr = mz;
-
- if ((i+1) == max_kni_ifaces) {
- it->next = NULL;
- kni_memzone_pool.free_tail = it;
- } else
- it->next = &kni_memzone_pool.slots[i+1];
- }
-
- return;
-
-kni_fail:
- RTE_LOG(ERR, KNI, "Unable to allocate memory for max_kni_ifaces:%d."
- "Increase the amount of hugepages memory\n", max_kni_ifaces);
+ rte_memzone_free(kni->m_tx_q);
+ rte_memzone_free(kni->m_rx_q);
+ rte_memzone_free(kni->m_alloc_q);
+ rte_memzone_free(kni->m_free_q);
+ rte_memzone_free(kni->m_req_q);
+ rte_memzone_free(kni->m_resp_q);
+ rte_memzone_free(kni->m_sync_addr);
}
-
struct rte_kni *
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
const struct rte_kni_conf *conf,
@@ -294,41 +180,52 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
{
int ret;
struct rte_kni_device_info dev_info;
- struct rte_kni *ctx;
- char intf_name[RTE_KNI_NAMESIZE];
- const struct rte_memzone *mz;
- struct rte_kni_memzone_slot *slot = NULL;
+ struct rte_kni *kni;
+ struct rte_tailq_entry *te = NULL;
+ struct rte_kni_list *kni_list;
+
+ kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
if (!pktmbuf_pool || !conf || !conf->name[0])
return NULL;
/* Check if KNI subsystem has been initialized */
- if (kni_memzone_pool.initialized != 1) {
+ if (kni_fd < 0) {
RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
return NULL;
}
- /* Get an available slot from the pool */
- slot = kni_memzone_pool_alloc();
- if (!slot) {
- RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n",
- kni_memzone_pool.max_ifaces);
- return NULL;
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, kni_list, next) {
+ kni = (struct rte_kni *) te->data;
+ if (strncmp(conf->name, kni->name, RTE_KNI_NAMESIZE) == 0)
+ break;
}
- /* Recover ctx */
- ctx = slot->m_ctx->addr;
- snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);
+ if (te != NULL) {
+ RTE_LOG(ERR, KNI, "KNI already exists\n");
+ goto unlock;
+ }
- if (ctx->in_use) {
- RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
- return NULL;
+ te = rte_zmalloc("KNI_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, KNI, "Failed to allocate tailq entry\n");
+ goto unlock;
+ }
+
+ kni = rte_zmalloc("KNI", sizeof(struct rte_kni), RTE_CACHE_LINE_SIZE);
+ if (kni == NULL) {
+ RTE_LOG(ERR, KNI, "KNI memory allocation failed\n");
+ goto kni_fail;
}
- memset(ctx, 0, sizeof(struct rte_kni));
+
+ snprintf(kni->name, RTE_KNI_NAMESIZE, "%s", conf->name);
+
if (ops)
- memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
+ memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
else
- ctx->ops.port_id = UINT16_MAX;
+ kni->ops.port_id = UINT16_MAX;
memset(&dev_info, 0, sizeof(dev_info));
dev_info.bus = conf->addr.bus;
@@ -344,72 +241,76 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
memcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN);
- snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
- snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);
+ snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", conf->name);
RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
dev_info.bus, dev_info.devid, dev_info.function,
dev_info.vendor_id, dev_info.device_id);
+
+ ret = kni_reserve_mz(kni);
+ if (ret < 0)
+ goto mz_fail;
+
/* TX RING */
- mz = slot->m_tx_q;
- ctx->tx_q = mz->addr;
- kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
- dev_info.tx_phys = mz->phys_addr;
+ kni->tx_q = kni->m_tx_q->addr;
+ kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);
+ dev_info.tx_phys = kni->m_tx_q->phys_addr;
/* RX RING */
- mz = slot->m_rx_q;
- ctx->rx_q = mz->addr;
- kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
- dev_info.rx_phys = mz->phys_addr;
+ kni->rx_q = kni->m_rx_q->addr;
+ kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);
+ dev_info.rx_phys = kni->m_rx_q->phys_addr;
/* ALLOC RING */
- mz = slot->m_alloc_q;
- ctx->alloc_q = mz->addr;
- kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
- dev_info.alloc_phys = mz->phys_addr;
+ kni->alloc_q = kni->m_alloc_q->addr;
+ kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);
+ dev_info.alloc_phys = kni->m_alloc_q->phys_addr;
/* FREE RING */
- mz = slot->m_free_q;
- ctx->free_q = mz->addr;
- kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
- dev_info.free_phys = mz->phys_addr;
+ kni->free_q = kni->m_free_q->addr;
+ kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);
+ dev_info.free_phys = kni->m_free_q->phys_addr;
/* Request RING */
- mz = slot->m_req_q;
- ctx->req_q = mz->addr;
- kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
- dev_info.req_phys = mz->phys_addr;
+ kni->req_q = kni->m_req_q->addr;
+ kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);
+ dev_info.req_phys = kni->m_req_q->phys_addr;
/* Response RING */
- mz = slot->m_resp_q;
- ctx->resp_q = mz->addr;
- kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
- dev_info.resp_phys = mz->phys_addr;
+ kni->resp_q = kni->m_resp_q->addr;
+ kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);
+ dev_info.resp_phys = kni->m_resp_q->phys_addr;
/* Req/Resp sync mem area */
- mz = slot->m_sync_addr;
- ctx->sync_addr = mz->addr;
- dev_info.sync_va = mz->addr;
- dev_info.sync_phys = mz->phys_addr;
+ kni->sync_addr = kni->m_sync_addr->addr;
+ dev_info.sync_va = kni->m_sync_addr->addr;
+ dev_info.sync_phys = kni->m_sync_addr->phys_addr;
- ctx->pktmbuf_pool = pktmbuf_pool;
- ctx->group_id = conf->group_id;
- ctx->slot_id = slot->id;
- ctx->mbuf_size = conf->mbuf_size;
+ kni->pktmbuf_pool = pktmbuf_pool;
+ kni->group_id = conf->group_id;
+ kni->mbuf_size = conf->mbuf_size;
ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
- KNI_MEM_CHECK(ret < 0);
+ if (ret < 0)
+ goto ioctl_fail;
- ctx->in_use = 1;
+ te->data = kni;
+ TAILQ_INSERT_TAIL(kni_list, te, next);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
/* Allocate mbufs and then put them into alloc_q */
- kni_allocate_mbufs(ctx);
+ kni_allocate_mbufs(kni);
- return ctx;
+ return kni;
+ioctl_fail:
+ kni_release_mz(kni);
+mz_fail:
+ rte_free(kni);
kni_fail:
- if (slot)
- kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);
+ rte_free(te);
+unlock:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
return NULL;
}
@@ -462,19 +363,37 @@ kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)
int
rte_kni_release(struct rte_kni *kni)
{
+ struct rte_tailq_entry *te;
+ struct rte_kni_list *kni_list;
struct rte_kni_device_info dev_info;
- uint32_t slot_id;
uint32_t retry = 5;
- if (!kni || !kni->in_use)
+ if (!kni)
return -1;
+ kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, kni_list, next) {
+ if (te->data == kni)
+ break;
+ }
+
+ if (te == NULL) {
+ goto unlock;
+ }
+
snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name);
if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
RTE_LOG(ERR, KNI, "Fail to release kni device\n");
- return -1;
+ goto unlock;
}
+ TAILQ_REMOVE(kni_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
/* mbufs in all fifo should be released, except request/response */
/* wait until all rxq packets processed by kernel */
@@ -488,20 +407,18 @@ rte_kni_release(struct rte_kni *kni)
kni_free_fifo(kni->tx_q);
kni_free_fifo(kni->free_q);
- slot_id = kni->slot_id;
+ kni_release_mz(kni);
- /* Memset the KNI struct */
- memset(kni, 0, sizeof(struct rte_kni));
+ rte_free(kni);
- /* Release memzone */
- if (slot_id > kni_memzone_pool.max_ifaces) {
- RTE_LOG(ERR, KNI, "KNI pool: corrupted slot ID: %d, max: %d\n",
- slot_id, kni_memzone_pool.max_ifaces);
- return -1;
- }
- kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);
+ rte_free(te);
return 0;
+
+unlock:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return -1;
}
/* default callback for request of configuring device mac address */
@@ -711,21 +628,28 @@ kni_allocate_mbufs(struct rte_kni *kni)
struct rte_kni *
rte_kni_get(const char *name)
{
- uint32_t i;
- struct rte_kni_memzone_slot *it;
- struct rte_kni *kni;
+ struct rte_kni *kni = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_kni_list *kni_list;
+
+ if (!name || !name[0])
+ return NULL;
- /* Note: could be improved perf-wise if necessary */
- for (i = 0; i < kni_memzone_pool.max_ifaces; i++) {
- it = &kni_memzone_pool.slots[i];
- if (it->in_use == 0)
- continue;
- kni = it->m_ctx->addr;
- if (strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0)
- return kni;
+ kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, kni_list, next) {
+ kni = te->data;
+ if (strncmp(name, kni->name, RTE_KNI_NAMESIZE) == 0)
+ break;
}
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
- return NULL;
+ if (te == NULL) {
+ return NULL;
+ }
+
+ return kni;
}
const char *
diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h
index 99055e2c2..601abdfc6 100644
--- a/lib/librte_kni/rte_kni.h
+++ b/lib/librte_kni/rte_kni.h
@@ -81,8 +81,12 @@ struct rte_kni_conf {
*
* @param max_kni_ifaces
* The maximum number of KNI interfaces that can coexist concurrently
+ *
+ * @return
+ * - 0 indicates success.
+ * - negative value indicates failure.
*/
-void rte_kni_init(unsigned int max_kni_ifaces);
+int rte_kni_init(unsigned int max_kni_ifaces);
/**
diff --git a/test/test/test_kni.c b/test/test/test_kni.c
index 1b876719a..56c98513a 100644
--- a/test/test/test_kni.c
+++ b/test/test/test_kni.c
@@ -429,12 +429,6 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
}
test_kni_ctx = NULL;
- /* test of releasing a released kni device */
- if (rte_kni_release(kni) == 0) {
- printf("should not release a released kni device\n");
- return -1;
- }
-
/* test of reusing memzone */
kni = rte_kni_alloc(mp, &conf, &ops);
if (!kni) {
--
2.18.0
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v2] kni: dynamically allocate memory for each KNI
2018-09-23 19:12 ` [dpdk-dev] [PATCH v2] " Igor Ryzhov
@ 2018-09-26 10:41 ` Ferruh Yigit
2018-09-26 13:31 ` Igor Ryzhov
2018-09-26 16:21 ` [dpdk-dev] [PATCH v3] " Igor Ryzhov
1 sibling, 1 reply; 13+ messages in thread
From: Ferruh Yigit @ 2018-09-26 10:41 UTC (permalink / raw)
To: Igor Ryzhov, dev
On 9/23/2018 8:12 PM, Igor Ryzhov wrote:
> Long time ago preallocation of memory for KNI was introduced in commit
> 0c6bc8e. It was done because of lack of ability to free previously
> allocated memzones, which led to memzone exhaustion. Currently memzones
> can be freed and this patch uses this ability for dynamic KNI memory
> allocation.
Hi Igor,
Good cleanup, thanks.
+1 to eal_tailq for ctx
A few minor comments below, but they are not significant enough to block the
patch, please let us know if you don't have bandwidth for a new version.
> Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
<...>
> @@ -294,41 +180,52 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
> {
> int ret;
> struct rte_kni_device_info dev_info;
> - struct rte_kni *ctx;
> - char intf_name[RTE_KNI_NAMESIZE];
> - const struct rte_memzone *mz;
> - struct rte_kni_memzone_slot *slot = NULL;
> + struct rte_kni *kni;
> + struct rte_tailq_entry *te = NULL;
> + struct rte_kni_list *kni_list;
> +
> + kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
Can you move this below input validation, no need this assignment if API will
fail because of wrong input.
> if (!pktmbuf_pool || !conf || !conf->name[0])
> return NULL;
>
> /* Check if KNI subsystem has been initialized */
> - if (kni_memzone_pool.initialized != 1) {
> + if (kni_fd < 0) {
> RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
> return NULL;
> }
>
> - /* Get an available slot from the pool */
> - slot = kni_memzone_pool_alloc();
> - if (!slot) {
> - RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n",
> - kni_memzone_pool.max_ifaces);
> - return NULL;
> + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
> +
> + TAILQ_FOREACH(te, kni_list, next) {
> + kni = (struct rte_kni *) te->data;
> + if (strncmp(conf->name, kni->name, RTE_KNI_NAMESIZE) == 0)
> + break;
> }
This is rte_kni_get(), why not reuse it. You can create an version of it without
lock.
like _rte_kni_get() which you can call here.
And
rte_kni_get()
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
_rte_kni_get()
rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
<...>
> +
> + if (te == NULL) {
> + goto unlock;
> + }
No need {} for single line.
One more below.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v2] kni: dynamically allocate memory for each KNI
2018-09-26 10:41 ` Ferruh Yigit
@ 2018-09-26 13:31 ` Igor Ryzhov
0 siblings, 0 replies; 13+ messages in thread
From: Igor Ryzhov @ 2018-09-26 13:31 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: dev
Hi Ferruh,
Will fix it today.
Igor
On Wed, Sep 26, 2018 at 1:41 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> On 9/23/2018 8:12 PM, Igor Ryzhov wrote:
> > Long time ago preallocation of memory for KNI was introduced in commit
> > 0c6bc8e. It was done because of lack of ability to free previously
> > allocated memzones, which led to memzone exhaustion. Currently memzones
> > can be freed and this patch uses this ability for dynamic KNI memory
> > allocation.
>
> Hi Igor,
>
> Good cleanup, thanks.
> +1 to eal_tailq for ctx
>
> A few minor comments below, but they are not significant enough to block
> the
> patch, please let us know if you don't have bandwidth for a new version.
>
> > Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
> <...>
>
> > @@ -294,41 +180,52 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
> > {
> > int ret;
> > struct rte_kni_device_info dev_info;
> > - struct rte_kni *ctx;
> > - char intf_name[RTE_KNI_NAMESIZE];
> > - const struct rte_memzone *mz;
> > - struct rte_kni_memzone_slot *slot = NULL;
> > + struct rte_kni *kni;
> > + struct rte_tailq_entry *te = NULL;
> > + struct rte_kni_list *kni_list;
> > +
> > + kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
>
> Can you move this below input validation, no need this assignment if API
> will
> fail because of wrong input.
>
> > if (!pktmbuf_pool || !conf || !conf->name[0])
> > return NULL;
> >
> > /* Check if KNI subsystem has been initialized */
> > - if (kni_memzone_pool.initialized != 1) {
> > + if (kni_fd < 0) {
> > RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized.
> Invoke rte_kni_init() first\n");
> > return NULL;
> > }
> >
> > - /* Get an available slot from the pool */
> > - slot = kni_memzone_pool_alloc();
> > - if (!slot) {
> > - RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces;
> increase the number of max_kni_ifaces(current %d) or release unused
> ones.\n",
> > - kni_memzone_pool.max_ifaces);
> > - return NULL;
> > + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
> > +
> > + TAILQ_FOREACH(te, kni_list, next) {
> > + kni = (struct rte_kni *) te->data;
> > + if (strncmp(conf->name, kni->name, RTE_KNI_NAMESIZE) == 0)
> > + break;
> > }
>
> This is rte_kni_get(), why not reuse it. You can create an version of it
> without
> lock.
>
> like _rte_kni_get() which you can call here.
>
> And
> rte_kni_get()
> rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
> _rte_kni_get()
> rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
>
> <...>
>
> > +
> > + if (te == NULL) {
> > + goto unlock;
> > + }
>
> No need {} for single line.
> One more below.
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v3] kni: dynamically allocate memory for each KNI
2018-09-23 19:12 ` [dpdk-dev] [PATCH v2] " Igor Ryzhov
2018-09-26 10:41 ` Ferruh Yigit
@ 2018-09-26 16:21 ` Igor Ryzhov
2018-09-27 19:20 ` Ferruh Yigit
` (2 more replies)
1 sibling, 3 replies; 13+ messages in thread
From: Igor Ryzhov @ 2018-09-26 16:21 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
Long time ago preallocation of memory for KNI was introduced in commit
0c6bc8e. It was done because of lack of ability to free previously
allocated memzones, which led to memzone exhaustion. Currently memzones
can be freed and this patch uses this ability for dynamic KNI memory
allocation.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
---
v3:
* style fixes
v2:
* allocate KNI using rte_zmalloc
* swap reserve/release functions
* use "kni" as a variable name
* use macros for memzone names
lib/librte_kni/rte_kni.c | 496 +++++++++++++++++----------------------
lib/librte_kni/rte_kni.h | 6 +-
test/test/test_kni.c | 6 -
3 files changed, 218 insertions(+), 290 deletions(-)
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 8a8f6c1cc..8b68008bb 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -18,6 +18,9 @@
#include <rte_log.h>
#include <rte_kni.h>
#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_rwlock.h>
+#include <rte_eal_memconfig.h>
#include <exec-env/rte_kni_common.h>
#include "rte_kni_fifo.h"
@@ -30,7 +33,23 @@
#define KNI_REQUEST_MBUF_NUM_MAX 32
-#define KNI_MEM_CHECK(cond) do { if (cond) goto kni_fail; } while (0)
+#define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0)
+
+#define KNI_MZ_NAME_FMT "kni_info_%s"
+#define KNI_TX_Q_MZ_NAME_FMT "kni_tx_%s"
+#define KNI_RX_Q_MZ_NAME_FMT "kni_rx_%s"
+#define KNI_ALLOC_Q_MZ_NAME_FMT "kni_alloc_%s"
+#define KNI_FREE_Q_MZ_NAME_FMT "kni_free_%s"
+#define KNI_REQ_Q_MZ_NAME_FMT "kni_req_%s"
+#define KNI_RESP_Q_MZ_NAME_FMT "kni_resp_%s"
+#define KNI_SYNC_ADDR_MZ_NAME_FMT "kni_sync_%s"
+
+TAILQ_HEAD(rte_kni_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_kni_tailq = {
+ .name = "RTE_KNI",
+};
+EAL_REGISTER_TAILQ(rte_kni_tailq)
/**
* KNI context
@@ -42,18 +61,26 @@ struct rte_kni {
struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
unsigned mbuf_size; /**< mbuf size */
+ const struct rte_memzone *m_tx_q; /**< TX queue memzone */
+ const struct rte_memzone *m_rx_q; /**< RX queue memzone */
+ const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */
+ const struct rte_memzone *m_free_q; /**< Free queue memzone */
+
struct rte_kni_fifo *tx_q; /**< TX queue */
struct rte_kni_fifo *rx_q; /**< RX queue */
struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
+ const struct rte_memzone *m_req_q; /**< Request queue memzone */
+ const struct rte_memzone *m_resp_q; /**< Response queue memzone */
+ const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */
+
/* For request & response */
struct rte_kni_fifo *req_q; /**< Request queue */
struct rte_kni_fifo *resp_q; /**< Response queue */
void * sync_addr; /**< Req/Resp Mem address */
struct rte_kni_ops ops; /**< operations for request */
- uint8_t in_use : 1; /**< kni in use */
};
enum kni_ops_status {
@@ -61,231 +88,111 @@ enum kni_ops_status {
KNI_REQ_REGISTERED,
};
-/**
- * KNI memzone pool slot
- */
-struct rte_kni_memzone_slot {
- uint32_t id;
- uint8_t in_use : 1; /**< slot in use */
-
- /* Memzones */
- const struct rte_memzone *m_ctx; /**< KNI ctx */
- const struct rte_memzone *m_tx_q; /**< TX queue */
- const struct rte_memzone *m_rx_q; /**< RX queue */
- const struct rte_memzone *m_alloc_q; /**< Allocated mbufs queue */
- const struct rte_memzone *m_free_q; /**< To be freed mbufs queue */
- const struct rte_memzone *m_req_q; /**< Request queue */
- const struct rte_memzone *m_resp_q; /**< Response queue */
- const struct rte_memzone *m_sync_addr;
-
- /* Free linked list */
- struct rte_kni_memzone_slot *next; /**< Next slot link.list */
-};
-
-/**
- * KNI memzone pool
- */
-struct rte_kni_memzone_pool {
- uint8_t initialized : 1; /**< Global KNI pool init flag */
-
- uint32_t max_ifaces; /**< Max. num of KNI ifaces */
- struct rte_kni_memzone_slot *slots; /**< Pool slots */
- rte_spinlock_t mutex; /**< alloc/release mutex */
-
- /* Free memzone slots linked-list */
- struct rte_kni_memzone_slot *free; /**< First empty slot */
- struct rte_kni_memzone_slot *free_tail; /**< Last empty slot */
-};
-
-
static void kni_free_mbufs(struct rte_kni *kni);
static void kni_allocate_mbufs(struct rte_kni *kni);
static volatile int kni_fd = -1;
-static struct rte_kni_memzone_pool kni_memzone_pool = {
- .initialized = 0,
-};
-static const struct rte_memzone *
-kni_memzone_reserve(const char *name, size_t len, int socket_id,
- unsigned flags)
+/* Shall be called before any allocation happens */
+int
+rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
{
- const struct rte_memzone *mz = rte_memzone_lookup(name);
-
- if (mz == NULL)
- mz = rte_memzone_reserve(name, len, socket_id, flags);
+ /* Check FD and open */
+ if (kni_fd < 0) {
+ kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
+ if (kni_fd < 0) {
+ RTE_LOG(ERR, KNI,
+ "Can not open /dev/%s\n", KNI_DEVICE);
+ return -1;
+ }
+ }
- return mz;
+ return 0;
}
-/* Pool mgmt */
-static struct rte_kni_memzone_slot*
-kni_memzone_pool_alloc(void)
+static struct rte_kni *
+__rte_kni_get(const char *name)
{
- struct rte_kni_memzone_slot *slot;
+ struct rte_kni *kni;
+ struct rte_tailq_entry *te;
+ struct rte_kni_list *kni_list;
- rte_spinlock_lock(&kni_memzone_pool.mutex);
+ kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
- if (!kni_memzone_pool.free) {
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
- return NULL;
+ TAILQ_FOREACH(te, kni_list, next) {
+ kni = te->data;
+ if (strncmp(name, kni->name, RTE_KNI_NAMESIZE) == 0)
+ break;
}
- slot = kni_memzone_pool.free;
- kni_memzone_pool.free = slot->next;
- slot->in_use = 1;
-
- if (!kni_memzone_pool.free)
- kni_memzone_pool.free_tail = NULL;
+ if (te == NULL)
+ kni = NULL;
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
-
- return slot;
+ return kni;
}
-static void
-kni_memzone_pool_release(struct rte_kni_memzone_slot *slot)
+static int
+kni_reserve_mz(struct rte_kni *kni)
{
- rte_spinlock_lock(&kni_memzone_pool.mutex);
+ char mz_name[RTE_MEMZONE_NAMESIZE];
- if (kni_memzone_pool.free)
- kni_memzone_pool.free_tail->next = slot;
- else
- kni_memzone_pool.free = slot;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);
+ kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);
- kni_memzone_pool.free_tail = slot;
- slot->next = NULL;
- slot->in_use = 0;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);
+ kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);
- rte_spinlock_unlock(&kni_memzone_pool.mutex);
-}
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);
+ kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);
+ kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);
-/* Shall be called before any allocation happens */
-void
-rte_kni_init(unsigned int max_kni_ifaces)
-{
- uint32_t i;
- struct rte_kni_memzone_slot *it;
- const struct rte_memzone *mz;
-#define OBJNAMSIZ 32
- char obj_name[OBJNAMSIZ];
- char mz_name[RTE_MEMZONE_NAMESIZE];
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);
+ kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail);
- /* Immediately return if KNI is already initialized */
- if (kni_memzone_pool.initialized) {
- RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()");
- return;
- }
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name);
+ kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail);
- if (max_kni_ifaces == 0) {
- RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
- max_kni_ifaces);
- RTE_LOG(ERR, KNI, "Unable to initialize KNI\n");
- return;
- }
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name);
+ kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail);
- /* Check FD and open */
- if (kni_fd < 0) {
- kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
- if (kni_fd < 0) {
- RTE_LOG(ERR, KNI,
- "Can not open /dev/%s\n", KNI_DEVICE);
- return;
- }
- }
-
- /* Allocate slot objects */
- kni_memzone_pool.slots = (struct rte_kni_memzone_slot *)
- rte_malloc(NULL,
- sizeof(struct rte_kni_memzone_slot) *
- max_kni_ifaces,
- 0);
- KNI_MEM_CHECK(kni_memzone_pool.slots == NULL);
-
- /* Initialize general pool variables */
- kni_memzone_pool.initialized = 1;
- kni_memzone_pool.max_ifaces = max_kni_ifaces;
- kni_memzone_pool.free = &kni_memzone_pool.slots[0];
- rte_spinlock_init(&kni_memzone_pool.mutex);
-
- /* Pre-allocate all memzones of all the slots; panic on error */
- for (i = 0; i < max_kni_ifaces; i++) {
-
- /* Recover current slot */
- it = &kni_memzone_pool.slots[i];
- it->id = i;
-
- /* Allocate KNI context */
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i);
- mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_ctx = mz;
-
- /* TX RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_tx_q = mz;
-
- /* RX RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_rx_q = mz;
-
- /* ALLOC RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_alloc_q = mz;
-
- /* FREE RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_free_q = mz;
-
- /* Request RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_req_q = mz;
-
- /* Response RING */
- snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_resp_q = mz;
-
- /* Req/Resp sync mem area */
- snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i);
- mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
- SOCKET_ID_ANY, 0);
- KNI_MEM_CHECK(mz == NULL);
- it->m_sync_addr = mz;
-
- if ((i+1) == max_kni_ifaces) {
- it->next = NULL;
- kni_memzone_pool.free_tail = it;
- } else
- it->next = &kni_memzone_pool.slots[i+1];
- }
-
- return;
+ return 0;
-kni_fail:
- RTE_LOG(ERR, KNI, "Unable to allocate memory for max_kni_ifaces:%d."
- "Increase the amount of hugepages memory\n", max_kni_ifaces);
+sync_addr_fail:
+ rte_memzone_free(kni->m_resp_q);
+resp_q_fail:
+ rte_memzone_free(kni->m_req_q);
+req_q_fail:
+ rte_memzone_free(kni->m_free_q);
+free_q_fail:
+ rte_memzone_free(kni->m_alloc_q);
+alloc_q_fail:
+ rte_memzone_free(kni->m_rx_q);
+rx_q_fail:
+ rte_memzone_free(kni->m_tx_q);
+tx_q_fail:
+ return -1;
}
+static void
+kni_release_mz(struct rte_kni *kni)
+{
+ rte_memzone_free(kni->m_tx_q);
+ rte_memzone_free(kni->m_rx_q);
+ rte_memzone_free(kni->m_alloc_q);
+ rte_memzone_free(kni->m_free_q);
+ rte_memzone_free(kni->m_req_q);
+ rte_memzone_free(kni->m_resp_q);
+ rte_memzone_free(kni->m_sync_addr);
+}
struct rte_kni *
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
@@ -294,41 +201,45 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
{
int ret;
struct rte_kni_device_info dev_info;
- struct rte_kni *ctx;
- char intf_name[RTE_KNI_NAMESIZE];
- const struct rte_memzone *mz;
- struct rte_kni_memzone_slot *slot = NULL;
+ struct rte_kni *kni;
+ struct rte_tailq_entry *te;
+ struct rte_kni_list *kni_list;
if (!pktmbuf_pool || !conf || !conf->name[0])
return NULL;
/* Check if KNI subsystem has been initialized */
- if (kni_memzone_pool.initialized != 1) {
+ if (kni_fd < 0) {
RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
return NULL;
}
- /* Get an available slot from the pool */
- slot = kni_memzone_pool_alloc();
- if (!slot) {
- RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n",
- kni_memzone_pool.max_ifaces);
- return NULL;
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ kni = __rte_kni_get(conf->name);
+ if (kni != NULL) {
+ RTE_LOG(ERR, KNI, "KNI already exists\n");
+ goto unlock;
}
- /* Recover ctx */
- ctx = slot->m_ctx->addr;
- snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);
+ te = rte_zmalloc("KNI_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, KNI, "Failed to allocate tailq entry\n");
+ goto unlock;
+ }
- if (ctx->in_use) {
- RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
- return NULL;
+ kni = rte_zmalloc("KNI", sizeof(struct rte_kni), RTE_CACHE_LINE_SIZE);
+ if (kni == NULL) {
+ RTE_LOG(ERR, KNI, "KNI memory allocation failed\n");
+ goto kni_fail;
}
- memset(ctx, 0, sizeof(struct rte_kni));
+
+ snprintf(kni->name, RTE_KNI_NAMESIZE, "%s", conf->name);
+
if (ops)
- memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
+ memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
else
- ctx->ops.port_id = UINT16_MAX;
+ kni->ops.port_id = UINT16_MAX;
memset(&dev_info, 0, sizeof(dev_info));
dev_info.bus = conf->addr.bus;
@@ -344,72 +255,79 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
memcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN);
- snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
- snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);
+ snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", conf->name);
RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
dev_info.bus, dev_info.devid, dev_info.function,
dev_info.vendor_id, dev_info.device_id);
+
+ ret = kni_reserve_mz(kni);
+ if (ret < 0)
+ goto mz_fail;
+
/* TX RING */
- mz = slot->m_tx_q;
- ctx->tx_q = mz->addr;
- kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
- dev_info.tx_phys = mz->phys_addr;
+ kni->tx_q = kni->m_tx_q->addr;
+ kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);
+ dev_info.tx_phys = kni->m_tx_q->phys_addr;
/* RX RING */
- mz = slot->m_rx_q;
- ctx->rx_q = mz->addr;
- kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
- dev_info.rx_phys = mz->phys_addr;
+ kni->rx_q = kni->m_rx_q->addr;
+ kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);
+ dev_info.rx_phys = kni->m_rx_q->phys_addr;
/* ALLOC RING */
- mz = slot->m_alloc_q;
- ctx->alloc_q = mz->addr;
- kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
- dev_info.alloc_phys = mz->phys_addr;
+ kni->alloc_q = kni->m_alloc_q->addr;
+ kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);
+ dev_info.alloc_phys = kni->m_alloc_q->phys_addr;
/* FREE RING */
- mz = slot->m_free_q;
- ctx->free_q = mz->addr;
- kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
- dev_info.free_phys = mz->phys_addr;
+ kni->free_q = kni->m_free_q->addr;
+ kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);
+ dev_info.free_phys = kni->m_free_q->phys_addr;
/* Request RING */
- mz = slot->m_req_q;
- ctx->req_q = mz->addr;
- kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
- dev_info.req_phys = mz->phys_addr;
+ kni->req_q = kni->m_req_q->addr;
+ kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);
+ dev_info.req_phys = kni->m_req_q->phys_addr;
/* Response RING */
- mz = slot->m_resp_q;
- ctx->resp_q = mz->addr;
- kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
- dev_info.resp_phys = mz->phys_addr;
+ kni->resp_q = kni->m_resp_q->addr;
+ kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);
+ dev_info.resp_phys = kni->m_resp_q->phys_addr;
/* Req/Resp sync mem area */
- mz = slot->m_sync_addr;
- ctx->sync_addr = mz->addr;
- dev_info.sync_va = mz->addr;
- dev_info.sync_phys = mz->phys_addr;
+ kni->sync_addr = kni->m_sync_addr->addr;
+ dev_info.sync_va = kni->m_sync_addr->addr;
+ dev_info.sync_phys = kni->m_sync_addr->phys_addr;
- ctx->pktmbuf_pool = pktmbuf_pool;
- ctx->group_id = conf->group_id;
- ctx->slot_id = slot->id;
- ctx->mbuf_size = conf->mbuf_size;
+ kni->pktmbuf_pool = pktmbuf_pool;
+ kni->group_id = conf->group_id;
+ kni->mbuf_size = conf->mbuf_size;
ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
- KNI_MEM_CHECK(ret < 0);
+ if (ret < 0)
+ goto ioctl_fail;
- ctx->in_use = 1;
+ te->data = kni;
+
+ kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
+ TAILQ_INSERT_TAIL(kni_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
/* Allocate mbufs and then put them into alloc_q */
- kni_allocate_mbufs(ctx);
+ kni_allocate_mbufs(kni);
- return ctx;
+ return kni;
+ioctl_fail:
+ kni_release_mz(kni);
+mz_fail:
+ rte_free(kni);
kni_fail:
- if (slot)
- kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);
+ rte_free(te);
+unlock:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
return NULL;
}
@@ -462,19 +380,36 @@ kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)
int
rte_kni_release(struct rte_kni *kni)
{
+ struct rte_tailq_entry *te;
+ struct rte_kni_list *kni_list;
struct rte_kni_device_info dev_info;
- uint32_t slot_id;
uint32_t retry = 5;
- if (!kni || !kni->in_use)
+ if (!kni)
return -1;
+ kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, kni_list, next) {
+ if (te->data == kni)
+ break;
+ }
+
+ if (te == NULL)
+ goto unlock;
+
snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name);
if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
RTE_LOG(ERR, KNI, "Fail to release kni device\n");
- return -1;
+ goto unlock;
}
+ TAILQ_REMOVE(kni_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
/* mbufs in all fifo should be released, except request/response */
/* wait until all rxq packets processed by kernel */
@@ -488,20 +423,18 @@ rte_kni_release(struct rte_kni *kni)
kni_free_fifo(kni->tx_q);
kni_free_fifo(kni->free_q);
- slot_id = kni->slot_id;
+ kni_release_mz(kni);
- /* Memset the KNI struct */
- memset(kni, 0, sizeof(struct rte_kni));
+ rte_free(kni);
- /* Release memzone */
- if (slot_id > kni_memzone_pool.max_ifaces) {
- RTE_LOG(ERR, KNI, "KNI pool: corrupted slot ID: %d, max: %d\n",
- slot_id, kni_memzone_pool.max_ifaces);
- return -1;
- }
- kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);
+ rte_free(te);
return 0;
+
+unlock:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return -1;
}
/* default callback for request of configuring device mac address */
@@ -711,21 +644,18 @@ kni_allocate_mbufs(struct rte_kni *kni)
struct rte_kni *
rte_kni_get(const char *name)
{
- uint32_t i;
- struct rte_kni_memzone_slot *it;
struct rte_kni *kni;
- /* Note: could be improved perf-wise if necessary */
- for (i = 0; i < kni_memzone_pool.max_ifaces; i++) {
- it = &kni_memzone_pool.slots[i];
- if (it->in_use == 0)
- continue;
- kni = it->m_ctx->addr;
- if (strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0)
- return kni;
- }
+ if (!name || !name[0])
+ return NULL;
- return NULL;
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ kni = __rte_kni_get(name);
+
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return kni;
}
const char *
diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h
index 99055e2c2..601abdfc6 100644
--- a/lib/librte_kni/rte_kni.h
+++ b/lib/librte_kni/rte_kni.h
@@ -81,8 +81,12 @@ struct rte_kni_conf {
*
* @param max_kni_ifaces
* The maximum number of KNI interfaces that can coexist concurrently
+ *
+ * @return
+ * - 0 indicates success.
+ * - negative value indicates failure.
*/
-void rte_kni_init(unsigned int max_kni_ifaces);
+int rte_kni_init(unsigned int max_kni_ifaces);
/**
diff --git a/test/test/test_kni.c b/test/test/test_kni.c
index 1b876719a..56c98513a 100644
--- a/test/test/test_kni.c
+++ b/test/test/test_kni.c
@@ -429,12 +429,6 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
}
test_kni_ctx = NULL;
- /* test of releasing a released kni device */
- if (rte_kni_release(kni) == 0) {
- printf("should not release a released kni device\n");
- return -1;
- }
-
/* test of reusing memzone */
kni = rte_kni_alloc(mp, &conf, &ops);
if (!kni) {
--
2.19.0
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v3] kni: dynamically allocate memory for each KNI
2018-09-26 16:21 ` [dpdk-dev] [PATCH v3] " Igor Ryzhov
@ 2018-09-27 19:20 ` Ferruh Yigit
2018-10-02 13:06 ` Thomas Monjalon
2018-10-02 15:29 ` Thomas Monjalon
2 siblings, 0 replies; 13+ messages in thread
From: Ferruh Yigit @ 2018-09-27 19:20 UTC (permalink / raw)
To: Igor Ryzhov, dev
On 9/26/2018 5:21 PM, Igor Ryzhov wrote:
> Long time ago preallocation of memory for KNI was introduced in commit
> 0c6bc8e. It was done because of lack of ability to free previously
> allocated memzones, which led to memzone exhaustion. Currently memzones
> can be freed and this patch uses this ability for dynamic KNI memory
> allocation.
>
> Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v3] kni: dynamically allocate memory for each KNI
2018-09-26 16:21 ` [dpdk-dev] [PATCH v3] " Igor Ryzhov
2018-09-27 19:20 ` Ferruh Yigit
@ 2018-10-02 13:06 ` Thomas Monjalon
2018-10-02 13:27 ` Ferruh Yigit
2018-10-02 15:29 ` Thomas Monjalon
2 siblings, 1 reply; 13+ messages in thread
From: Thomas Monjalon @ 2018-10-02 13:06 UTC (permalink / raw)
To: Igor Ryzhov, ferruh.yigit; +Cc: dev
Hi,
26/09/2018 18:21, Igor Ryzhov:
> Long time ago preallocation of memory for KNI was introduced in commit
> 0c6bc8e. It was done because of lack of ability to free previously
> allocated memzones, which led to memzone exhaustion. Currently memzones
> can be freed and this patch uses this ability for dynamic KNI memory
> allocation.
>
> Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
The patch does not apply on master.
Is there a dependency?
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v3] kni: dynamically allocate memory for each KNI
2018-10-02 13:06 ` Thomas Monjalon
@ 2018-10-02 13:27 ` Ferruh Yigit
2018-10-02 13:27 ` Ferruh Yigit
0 siblings, 1 reply; 13+ messages in thread
From: Ferruh Yigit @ 2018-10-02 13:27 UTC (permalink / raw)
To: Thomas Monjalon, Igor Ryzhov; +Cc: dev
On 10/2/2018 2:06 PM, Thomas Monjalon wrote:
> Hi,
>
> 26/09/2018 18:21, Igor Ryzhov:
>> Long time ago preallocation of memory for KNI was introduced in commit
>> 0c6bc8e. It was done because of lack of ability to free previously
>> allocated memzones, which led to memzone exhaustion. Currently memzones
>> can be freed and this patch uses this ability for dynamic KNI memory
>> allocation.
>>
>> Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
>
> The patch does not apply on master.
> Is there a dependency?
There should not be a conflict, following commit already in tree is causing the
conflict, I was resolving it myself when testing:
Commit e716b639856c ("kni: fix crash with null name")
Basically both do same thing:
<<<<<<< IN REPO
if (name == NULL || name[0] == '\0')
return NULL;
=======
if (!name || !name[0])
return NULL;
>>>>>>> PATCH
It seems Igor has a older version of the tree. It is OK to keep the check
existing in the repo.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v3] kni: dynamically allocate memory for each KNI
2018-10-02 13:27 ` Ferruh Yigit
@ 2018-10-02 13:27 ` Ferruh Yigit
0 siblings, 0 replies; 13+ messages in thread
From: Ferruh Yigit @ 2018-10-02 13:27 UTC (permalink / raw)
To: Thomas Monjalon, Igor Ryzhov; +Cc: dev
On 10/2/2018 2:27 PM, Ferruh Yigit wrote:
> On 10/2/2018 2:06 PM, Thomas Monjalon wrote:
>> Hi,
>>
>> 26/09/2018 18:21, Igor Ryzhov:
>>> Long time ago preallocation of memory for KNI was introduced in commit
>>> 0c6bc8e. It was done because of lack of ability to free previously
>>> allocated memzones, which led to memzone exhaustion. Currently memzones
>>> can be freed and this patch uses this ability for dynamic KNI memory
>>> allocation.
>>>
>>> Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
>>
>> The patch does not apply on master.
>> Is there a dependency?
>
> There should not be a conflict, following commit already in tree is causing the
There should not be a _dependency_, ...
> conflict, I was resolving it myself when testing:
> Commit e716b639856c ("kni: fix crash with null name")
>
> Basically both do same thing:
> <<<<<<< IN REPO
> if (name == NULL || name[0] == '\0')
> return NULL;
> =======
> if (!name || !name[0])
> return NULL;
> >>>>>>> PATCH
>
>
> It seems Igor has a older version of the tree. It is OK to keep the check
> existing in the repo.
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v3] kni: dynamically allocate memory for each KNI
2018-09-26 16:21 ` [dpdk-dev] [PATCH v3] " Igor Ryzhov
2018-09-27 19:20 ` Ferruh Yigit
2018-10-02 13:06 ` Thomas Monjalon
@ 2018-10-02 15:29 ` Thomas Monjalon
2 siblings, 0 replies; 13+ messages in thread
From: Thomas Monjalon @ 2018-10-02 15:29 UTC (permalink / raw)
To: Igor Ryzhov; +Cc: dev, ferruh.yigit
26/09/2018 18:21, Igor Ryzhov:
> Long time ago preallocation of memory for KNI was introduced in commit
> 0c6bc8e. It was done because of lack of ability to free previously
> allocated memzones, which led to memzone exhaustion. Currently memzones
> can be freed and this patch uses this ability for dynamic KNI memory
> allocation.
>
> Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
Applied (with conflict solved), thanks
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2018-10-02 15:29 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-02 14:25 [dpdk-dev] [PATCH] kni: dynamically allocate memory for each KNI Igor Ryzhov
2018-08-27 17:06 ` Ferruh Yigit
2018-08-29 9:52 ` Igor Ryzhov
2018-08-30 10:55 ` Ferruh Yigit
2018-09-23 19:12 ` [dpdk-dev] [PATCH v2] " Igor Ryzhov
2018-09-26 10:41 ` Ferruh Yigit
2018-09-26 13:31 ` Igor Ryzhov
2018-09-26 16:21 ` [dpdk-dev] [PATCH v3] " Igor Ryzhov
2018-09-27 19:20 ` Ferruh Yigit
2018-10-02 13:06 ` Thomas Monjalon
2018-10-02 13:27 ` Ferruh Yigit
2018-10-02 13:27 ` Ferruh Yigit
2018-10-02 15:29 ` Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).