From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3DEBBA0C41; Sat, 9 Oct 2021 11:38:20 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 265E2410DC; Sat, 9 Oct 2021 11:38:04 +0200 (CEST) Received: from szxga03-in.huawei.com (szxga03-in.huawei.com [45.249.212.189]) by mails.dpdk.org (Postfix) with ESMTP id AF13F40142 for ; Sat, 9 Oct 2021 11:37:59 +0200 (CEST) Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.54]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4HRKhR4zq4zQj5f; Sat, 9 Oct 2021 17:36:55 +0800 (CST) Received: from dggpeml500024.china.huawei.com (7.185.36.10) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.8; Sat, 9 Oct 2021 17:37:58 +0800 Received: from localhost.localdomain (10.67.165.24) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.8; Sat, 9 Oct 2021 17:37:58 +0800 From: Chengwen Feng To: , , , , , CC: , , , , , , , , , , , Date: Sat, 9 Oct 2021 17:33:38 +0800 Message-ID: <20211009093340.43237-5-fengchengwen@huawei.com> X-Mailer: git-send-email 2.33.0 In-Reply-To: <20211009093340.43237-1-fengchengwen@huawei.com> References: <1625231891-2963-1-git-send-email-fengchengwen@huawei.com> <20211009093340.43237-1-fengchengwen@huawei.com> MIME-Version: 1.0 Content-Type: text/plain; charset="UTF-8" Content-Transfer-Encoding: 8bit X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems705-chm.china.huawei.com (10.3.19.182) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v24 4/6] dmadev: add multi-process support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add multi-process support for dmadev. Signed-off-by: Chengwen Feng Acked-by: Bruce Richardson Acked-by: Morten Brørup Reviewed-by: Kevin Laatz Reviewed-by: Conor Walsh --- doc/guides/rel_notes/release_21_11.rst | 1 + lib/dmadev/rte_dmadev.c | 176 ++++++++++++++++++++----- lib/dmadev/rte_dmadev_pmd.h | 29 +++- 3 files changed, 163 insertions(+), 43 deletions(-) diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst index d1d7abf694..af32fce1ed 100644 --- a/doc/guides/rel_notes/release_21_11.rst +++ b/doc/guides/rel_notes/release_21_11.rst @@ -145,6 +145,7 @@ New Features * Device allocation functions. * Control and data plane API. + * Multi-process support. Removed Items diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c index 891ceeb988..2dba676b2b 100644 --- a/lib/dmadev/rte_dmadev.c +++ b/lib/dmadev/rte_dmadev.c @@ -19,6 +19,13 @@ static int16_t dma_devices_max; struct rte_dma_fp_object *rte_dma_fp_objs; struct rte_dma_dev *rte_dma_devices; +static struct { + /* Hold the dev_max information of the primary process. This field is + * set by the primary process and is read by the secondary process. + */ + int16_t dev_max; + struct rte_dma_dev_data data[0]; +} *dma_devices_shared_data; RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO); #define RTE_DMA_LOG(level, ...) \ @@ -70,11 +77,11 @@ dma_find_free_id(void) { int16_t i; - if (rte_dma_devices == NULL) + if (rte_dma_devices == NULL || dma_devices_shared_data == NULL) return -1; for (i = 0; i < dma_devices_max; i++) { - if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED) + if (dma_devices_shared_data->data[i].dev_name[0] == '\0') return i; } @@ -91,7 +98,7 @@ dma_find_by_name(const char *name) for (i = 0; i < dma_devices_max; i++) { if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) && - (!strcmp(name, rte_dma_devices[i].dev_name))) + (!strcmp(name, rte_dma_devices[i].data->dev_name))) return &rte_dma_devices[i]; } @@ -147,23 +154,71 @@ dma_dev_data_prepare(void) return 0; } +static int +dma_shared_data_prepare(void) +{ + const char *mz_name = "rte_dma_dev_data"; + const struct rte_memzone *mz; + size_t size; + + if (dma_devices_shared_data != NULL) + return 0; + + size = sizeof(*dma_devices_shared_data) + + sizeof(struct rte_dma_dev_data) * dma_devices_max; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0); + else + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + return -ENOMEM; + + dma_devices_shared_data = mz->addr; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + memset(dma_devices_shared_data, 0, size); + dma_devices_shared_data->dev_max = dma_devices_max; + } else { + dma_devices_max = dma_devices_shared_data->dev_max; + } + + return 0; +} + static int dma_data_prepare(void) { int ret; - if (dma_devices_max == 0) - dma_devices_max = RTE_DMADEV_DEFAULT_MAX; - - ret = dma_fp_data_prepare(); - if (ret) - return ret; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (dma_devices_max == 0) + dma_devices_max = RTE_DMADEV_DEFAULT_MAX; + ret = dma_fp_data_prepare(); + if (ret) + return ret; + ret = dma_dev_data_prepare(); + if (ret) + return ret; + ret = dma_shared_data_prepare(); + if (ret) + return ret; + } else { + ret = dma_shared_data_prepare(); + if (ret) + return ret; + ret = dma_fp_data_prepare(); + if (ret) + return ret; + ret = dma_dev_data_prepare(); + if (ret) + return ret; + } - return dma_dev_data_prepare(); + return 0; } static struct rte_dma_dev * -dma_allocate(const char *name, int numa_node, size_t private_data_size) +dma_allocate_primary(const char *name, int numa_node, size_t private_data_size) { struct rte_dma_dev *dev; void *dev_private; @@ -197,10 +252,54 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size) } dev = &rte_dma_devices[dev_id]; - rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name)); - dev->dev_id = dev_id; - dev->numa_node = numa_node; - dev->dev_private = dev_private; + dev->data = &dma_devices_shared_data->data[dev_id]; + rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name)); + dev->data->dev_id = dev_id; + dev->data->numa_node = numa_node; + dev->data->dev_private = dev_private; + + return dev; +} + +static struct rte_dma_dev * +dma_attach_secondary(const char *name) +{ + struct rte_dma_dev *dev; + int16_t i; + int ret; + + ret = dma_data_prepare(); + if (ret < 0) { + RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data"); + return NULL; + } + + for (i = 0; i < dma_devices_max; i++) { + if (!strcmp(dma_devices_shared_data->data[i].dev_name, name)) + break; + } + if (i == dma_devices_max) { + RTE_DMA_LOG(ERR, + "Device %s is not driven by the primary process", + name); + return NULL; + } + + dev = &rte_dma_devices[i]; + dev->data = &dma_devices_shared_data->data[i]; + + return dev; +} + +static struct rte_dma_dev * +dma_allocate(const char *name, int numa_node, size_t private_data_size) +{ + struct rte_dma_dev *dev; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + dev = dma_allocate_primary(name, numa_node, private_data_size); + else + dev = dma_attach_secondary(name); return dev; } @@ -208,7 +307,11 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size) static void dma_release(struct rte_dma_dev *dev) { - rte_free(dev->dev_private); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rte_free(dev->data->dev_private); + memset(dev->data, 0, sizeof(struct rte_dma_dev_data)); + } + memset(dev, 0, sizeof(struct rte_dma_dev)); } @@ -242,7 +345,7 @@ rte_dma_pmd_release(const char *name) return -EINVAL; if (dev->state == RTE_DMA_DEV_READY) - return rte_dma_close(dev->dev_id); + return rte_dma_close(dev->data->dev_id); dma_release(dev); return 0; @@ -260,7 +363,7 @@ rte_dma_get_dev_id_by_name(const char *name) if (dev == NULL) return -EINVAL; - return dev->dev_id; + return dev->data->dev_id; } bool @@ -305,7 +408,7 @@ rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info) return ret; dev_info->numa_node = dev->device->numa_node; - dev_info->nb_vchans = dev->dev_conf.nb_vchans; + dev_info->nb_vchans = dev->data->dev_conf.nb_vchans; return 0; } @@ -320,7 +423,7 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf) if (!rte_dma_is_valid(dev_id) || dev_conf == NULL) return -EINVAL; - if (dev->dev_started != 0) { + if (dev->data->dev_started != 0) { RTE_DMA_LOG(ERR, "Device %d must be stopped to allow configuration", dev_id); @@ -352,7 +455,8 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf) ret = (*dev->dev_ops->dev_configure)(dev, dev_conf, sizeof(struct rte_dma_conf)); if (ret == 0) - memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf)); + memcpy(&dev->data->dev_conf, dev_conf, + sizeof(struct rte_dma_conf)); return ret; } @@ -368,12 +472,12 @@ rte_dma_start(int16_t dev_id) if (!rte_dma_is_valid(dev_id)) return -EINVAL; - if (dev->dev_conf.nb_vchans == 0) { + if (dev->data->dev_conf.nb_vchans == 0) { RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id); return -EINVAL; } - if (dev->dev_started != 0) { + if (dev->data->dev_started != 0) { RTE_DMA_LOG(WARNING, "Device %d already started", dev_id); return 0; } @@ -387,7 +491,7 @@ rte_dma_start(int16_t dev_id) mark_started: dma_fp_object_setup(dev_id, dev); - dev->dev_started = 1; + dev->data->dev_started = 1; return 0; } @@ -400,7 +504,7 @@ rte_dma_stop(int16_t dev_id) if (!rte_dma_is_valid(dev_id)) return -EINVAL; - if (dev->dev_started == 0) { + if (dev->data->dev_started == 0) { RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id); return 0; } @@ -414,7 +518,7 @@ rte_dma_stop(int16_t dev_id) mark_stopped: dma_fp_object_reset(dev_id); - dev->dev_started = 0; + dev->data->dev_started = 0; return 0; } @@ -428,7 +532,7 @@ rte_dma_close(int16_t dev_id) return -EINVAL; /* Device must be stopped before it can be closed */ - if (dev->dev_started == 1) { + if (dev->data->dev_started == 1) { RTE_DMA_LOG(ERR, "Device %d must be stopped before closing", dev_id); return -EBUSY; @@ -454,7 +558,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, if (!rte_dma_is_valid(dev_id) || conf == NULL) return -EINVAL; - if (dev->dev_started != 0) { + if (dev->data->dev_started != 0) { RTE_DMA_LOG(ERR, "Device %d must be stopped to allow configuration", dev_id); @@ -466,7 +570,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id); return -EINVAL; } - if (dev->dev_conf.nb_vchans == 0) { + if (dev->data->dev_conf.nb_vchans == 0) { RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id); return -EINVAL; } @@ -540,7 +644,7 @@ rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats) if (!rte_dma_is_valid(dev_id) || stats == NULL) return -EINVAL; - if (vchan >= dev->dev_conf.nb_vchans && + if (vchan >= dev->data->dev_conf.nb_vchans && vchan != RTE_DMA_ALL_VCHAN) { RTE_DMA_LOG(ERR, "Device %d vchan %u out of range", dev_id, vchan); @@ -561,7 +665,7 @@ rte_dma_stats_reset(int16_t dev_id, uint16_t vchan) if (!rte_dma_is_valid(dev_id)) return -EINVAL; - if (vchan >= dev->dev_conf.nb_vchans && + if (vchan >= dev->data->dev_conf.nb_vchans && vchan != RTE_DMA_ALL_VCHAN) { RTE_DMA_LOG(ERR, "Device %d vchan %u out of range", dev_id, vchan); @@ -634,14 +738,14 @@ rte_dma_dump(int16_t dev_id, FILE *f) } (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n", - dev->dev_id, - dev->dev_name, - dev->dev_started ? "started" : "stopped"); + dev->data->dev_id, + dev->data->dev_name, + dev->data->dev_started ? "started" : "stopped"); dma_dump_capability(f, dev_info.dev_capa); (void)fprintf(f, " max_vchans_supported: %u\n", dev_info.max_vchans); (void)fprintf(f, " nb_vchans_configured: %u\n", dev_info.nb_vchans); (void)fprintf(f, " silent_mode: %s\n", - dev->dev_conf.enable_silent ? "on" : "off"); + dev->data->dev_conf.enable_silent ? "on" : "off"); if (dev->dev_ops->dev_dump != NULL) return (*dev->dev_ops->dev_dump)(dev, f); @@ -724,7 +828,7 @@ dma_fp_object_setup(int16_t dev_id, const struct rte_dma_dev *dev) { struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; - obj->dev_private = dev->dev_private; + obj->dev_private = dev->data->dev_private; if (dev->dev_ops->copy) obj->copy = dev->dev_ops->copy; if (dev->dev_ops->copy_sg) diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h index 07056b45e7..c2902eddd9 100644 --- a/lib/dmadev/rte_dmadev_pmd.h +++ b/lib/dmadev/rte_dmadev_pmd.h @@ -83,6 +83,27 @@ struct rte_dma_dev_ops { rte_dma_completed_t completed; rte_dma_completed_status_t completed_status; }; + +/** + * @internal + * The data part, with no function pointers, associated with each DMA device. + * + * This structure is safe to place in shared memory to be common among different + * processes in a multi-process configuration. + * + * @see struct rte_dma_dev::data + */ +struct rte_dma_dev_data { + char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */ + int16_t dev_id; /**< Device [external] identifier. */ + int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */ + void *dev_private; /**< PMD-specific private data. */ + struct rte_dma_conf dev_conf; /**< DMA device configuration. */ + __extension__ + uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */ + uint64_t reserved[2]; /**< Reserved for future fields */ +} __rte_cache_aligned; + /** * Possible states of a DMA device. * @@ -101,18 +122,12 @@ enum rte_dma_dev_state { * The generic data structure associated with each DMA device. */ struct rte_dma_dev { - char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */ - int16_t dev_id; /**< Device [external] identifier. */ - int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */ - void *dev_private; /**< PMD-specific private data. */ /** Device info which supplied during device initialization. */ struct rte_device *device; + struct rte_dma_dev_data *data; /**< Pointer to shared device data. */ /** Functions implemented by PMD. */ const struct rte_dma_dev_ops *dev_ops; - struct rte_dma_conf dev_conf; /**< DMA device configuration. */ enum rte_dma_dev_state state; /**< Flag indicating the device state. */ - __extension__ - uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */ uint64_t reserved[2]; /**< Reserved for future fields. */ } __rte_cache_aligned; -- 2.33.0